query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Verify that the receiver correctly handles successful multihop jobs | def test_multihop_receiver_on_success(vo, did_factory, root_account, caches_mock, metrics_mock):
receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})
receiver_thread.start()
try:
src_rse = 'XRD1'
src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
jump_rse = 'XRD3'
jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
dst_rse = 'XRD4'
dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)
all_rses = [src_rse_id, jump_rse_id, dst_rse_id]
did = did_factory.upload_test_file(src_rse)
rule_priority = 5
rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None, priority=rule_priority)
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)
assert request['state'] == RequestState.DONE
request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)
assert request['state'] == RequestState.DONE
fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query({request['external_id']: {request['id']: request}})
assert fts_response[request['external_id']][request['id']].job_response['priority'] == rule_priority
# Two hops; both handled by receiver
assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 2
finally:
receiver_graceful_stop.set()
receiver_thread.join(timeout=5)
receiver_graceful_stop.clear() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_multihop_receiver_on_failure(vo, did_factory, replica_client, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger a failure error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']\n\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()",
"def test_redelivery_of_rejected_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 99)\n localConfig.submit_sm_throughput = 3\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 60 messages to the queue\n startAt = datetime.now()\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 60:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n counter = 0\n _receivedSubmitsCount = 0\n # Wait for 40 seconds before checking if all submits were delivered\n # It will check for throughput in each iteration\n while counter < 30:\n receivedSubmits = self.SMSCPort.factory.lastClient.submitRecords\n\n _receivedSubmitsCount = len(receivedSubmits)\n\n # Wait some time\n yield waitFor(1)\n\n counter += 1\n endAt = datetime.now()\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(2)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 60)",
"def test_redelivery_of_rejected_messages_after_restart(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = '#67-%s' % randint(10, 9999)\n localConfig.requeue_delay = 1\n localConfig.submit_sm_throughput = 1\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # Send 4 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 4:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n msgid = yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 5 seconds before stopping\n yield waitFor(5)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(5)\n\n # Save the count before starting the connector\n _submitRecordsCount = len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Wait for 5 seconds before starting again\n yield waitFor(5)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 10 seconds before stopping , all the rest of the queue must be sent\n yield waitFor(10)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(10)\n\n # Update the counter\n _submitRecordsCount += len(self.SMSCPort.factory.lastClient.submitRecords)\n\n # Assertions\n self.assertEqual(_submitRecordsCount, 4)",
"def final_send_message_validation(self):\n stats = self.transport.get_stats()\n randomdrops = stats.randomdrops\n forcedrops = stats.forcedrops\n mapsize = len(self.msgmap)\n msg = \"Final verification ForceDrops: %d RandomDrops: %d MISSING: %d\"\n _LOGGER.info(msg, forcedrops, randomdrops, (mapsize - randomdrops))\n\n if randomdrops != mapsize:\n # We will fail this test later, but do some logging here...\n _LOGGER.info(\"Drop queue size: %d\", len(self.droppedmsgs))\n\n if _TRACE.enabled():\n def logmr(id_, mrec):\n _TRACE(\"missing mr id: %s drop: %d\", id_, mrec.alwaysdrop)\n self.msgmap.process_all(logmr)\n\n self.harness.assertEqual(randomdrops, mapsize)",
"def test_job_failure(app):\n with worker(app):\n state = wait_for_results(app, length=100, sleep=0.2, maxwait=4)\n\n # Tasks have been delivered and executed.\n assert set(r.return_value for r in all_results(app)) == set(range(100))\n assert len(state.queue.messages) == 0\n\n # Consumer groups behaved properly.\n assert state.queue.info.groups == 1\n assert state.queue.groups[0].pending == 0\n\n # Nothing in the DLQ.\n assert len(state.dead.messages) == 0\n\n # Any scheduled tasks completed and removed.\n assert len(state.schedule) == 0",
"def _check_for_finished_job(self):\n raise NotImplementedError",
"def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')",
"def test_results_workers(self, affiliate_items):\n success_count = 0\n updater = mock.Mock()\n\n few_workers = BatchJob(affiliate_items, updater, workers=1)\n for result in few_workers.run():\n success_count += int(not result.is_error)\n\n many_workers = BatchJob(affiliate_items, updater, workers=4)\n for result in many_workers.run():\n success_count += int(not result.is_error)\n\n assert success_count == 8\n assert updater.call_count == 8",
"def handleMsgs(self):\n\n force_sheep_check = self.changed_last_step\n self.changed_last_step = False\n if not self.queue:\n return\n\n need_to_check = False\n for msg in self.popMsg(): # Receive message(s) from queue.\n if msg.type == Type.BLOCK:\n new_tx = msg.content\n if new_tx.hash in self.seen_tx:\n continue\n need_to_check = True\n self.changed_last_step = True\n self.handleNewTx(new_tx, msg.sender)\n elif msg.type == Type.REQUEST: # Requests are issued by other miners.\n target_hash = msg.content\n assert target_hash in self.seen_tx # I should never get a request for a tx I haven't seen.\n requestedTx = self.seen_tx[target_hash]\n self.sendMsg(msg.sender, Message(self.id, Type.BLOCK, requestedTx))\n if need_to_check or (self.hasSheep() and force_sheep_check): # Have to check every time if has sheep.\n self.checkAllTx()",
"def test_delivery_of_queued_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = str(randint(10, 99))\n localConfig.requeue_delay = 2\n localConfig.submit_sm_throughput = 20\n yield self.add(localConfig)\n\n # Send 150 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 150:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 20 seconds\n yield waitFor(20)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 30 seconds, all the rest of the queue must be sent\n yield waitFor(50)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(20)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 150)",
"def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e",
"def proceed(self):\n pass",
"def test_results_success(self, affiliate_items):\n success_count = 0\n\n updater = mock.Mock()\n batch_job = BatchJob(affiliate_items, updater)\n\n for result in batch_job.run():\n success_count += int(not result.is_error)\n\n assert success_count == 4",
"def test_send_to_grader_fail(self):\r\n\r\n student_response = \"This is a student submission\"\r\n self.mock_xqueue.send_to_queue.return_value = (1, \"Not Queued\")\r\n result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)\r\n self.assertFalse(result)",
"def test__API_with_wrong_answer(self):\n self.mock_connection.state = MockConnection.WRONG_NUM_OF_CONFIRMATIONS\n\n # timeout supposed to be here\n self.assertEqual(self.mutex.lock(), False) # acquire mutex",
"def verify_as_target(self, message_handler):",
"def check_for_work(self):\n print(\"validator: check for work\")\n self.check_for_analyzers()\n self.check_for_uploads()\n self.check_for_requests()",
"def test_results(self, affiliate_items):\n processed_count = 0\n error_count = 0\n\n updater = mock.Mock()\n batch_job = BatchJob(affiliate_items, updater)\n\n for result in batch_job.run():\n processed_count += 1\n error_count += int(result.is_error)\n\n assert updater.call_count == 4\n assert processed_count == 4\n assert error_count == 0",
"def test_save_send(self):\r\n # Don't really know how to test this effectively...\r\n # Would require to simulate a blocking socket on the recipient side...\r\n pass",
"def handle_upld_init(self, job):\n print(\"User uploaded a file.\")\n if job.data[\"high_reliability\"]:\n # Send the file information to all servers\n self.put_job_in_all_queues(job)\n\n list_job_results = self.get_internal_results_from_all_servers()\n\n if len(list_job_results) == 0:\n # We got no responses back, there are probably no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n\n # Check all the servers are ready to receive\n for result in list_job_results:\n if result.result[\"outcome\"] != \"ready to receive\":\n self.put_external_result(\n self.generate_failure_job(\"Unsuccessful, one of the servers is not ready\"))\n return\n\n # Tell the client we are ready to receive\n response_result = copy.deepcopy(list_job_results[0])\n response_result.processed_by = None\n self.put_external_result(response_result)\n\n else:\n # Get number of files from each server and upload to the server with the least files\n list_job = Job(\"LIST\")\n self.put_job_in_all_queues(list_job)\n list_job_results = self.get_internal_results_from_all_servers()\n\n # Use the server with the fewest files stored on it\n if len(list_job_results) == 0:\n # We got no responses back, there are probably no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n result_to_use = list_job_results[0]\n for each_result in list_job_results:\n if len(each_result.result[\"files_list\"]) < len(result_to_use.result[\"files_list\"]):\n result_to_use = each_result\n\n # Upload the file to the chosen server\n self.server_queues[result_to_use.processed_by].put_job(job)\n\n result = self.get_internal_result_from_server(result_to_use.processed_by)\n\n # Assign a token to the ongoing upload job so we can associate the client's UPLD_DATA command\n # with the server we should upload to (while still hiding information about the servers from\n # the client)\n token = self.gen_unused_random_num()\n self.UPLD_TOKEN_DICT[token] = result_to_use.processed_by\n result.processed_by = None\n result.token = token\n\n self.put_external_result(result)",
"def check_done(self):\n if len(self._calls) != 0:\n raise MockException(\"Still expecting more function calls\")",
"def test_async_call(self):\n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n manager.foreach_actor_async(lambda w: w.call())\n results.extend(manager.fetch_ready_async_reqs(timeout_seconds=None))\n # Wait for actors to recover.\n wait_for_restore()\n\n # Note that we can hardcode the numbers here because of the deterministic\n # lists of random numbers we use.\n # 7 calls succeeded, 4 failed.\n # The number of results back is much lower than 40, because we do not probe\n # the actors with this test. As soon as an actor errors out, it will get\n # taken out of the lineup forever.\n self.assertEqual(len([r for r in results if r.ok]), 7)\n self.assertEqual(len([r for r in results if not r.ok]), 4)\n\n manager.clear()",
"def _check(self) -> None:\n if not self._is_initiated:\n self._is_initiated = True\n\n self._wait_until_executing_orders_are_fully_handled()\n\n if jh.is_live() and jh.is_debugging():\n logger.info(f'Executing {self.name}-{self.exchange}-{self.symbol}-{self.timeframe}')\n\n # should cancel entry?\n if len(self.entry_orders) and self.is_close and self.should_cancel_entry():\n self._execute_cancel()\n\n # make sure order cancellation response is received via WS\n if jh.is_live():\n # sleep a little until cancel is received via WS\n sleep(0.1)\n # just in case, sleep some more if necessary\n for _ in range(20):\n if store.orders.count_active_orders(self.exchange, self.symbol) == 0:\n break\n\n logger.info('sleeping 0.2 more seconds until cancellation is over...')\n sleep(0.2)\n\n # If it's still not cancelled, something is wrong. Handle cancellation failure\n if store.orders.count_active_orders(self.exchange, self.symbol) != 0:\n raise exceptions.ExchangeNotResponding(\n 'The exchange did not respond as expected for order cancellation'\n )\n\n # update position\n if self.position.is_open:\n self._update_position()\n\n # sleep for 1 second if a MARKET order has been submitted but not executed yet (live trading only)\n if jh.is_livetrading():\n waiting_counter = 0\n waiting_seconds = 1\n while self._have_any_pending_market_exit_orders():\n if jh.is_debugging():\n logger.info(f'Waiting {waiting_seconds} second for pending market exit orders to be handled...')\n waiting_counter += 1\n sleep(1)\n if waiting_counter > 10:\n raise exceptions.ExchangeNotResponding(\n 'The exchange did not respond as expected for order execution'\n )\n\n self._simulate_market_order_execution()\n\n # should_long and should_short\n if self.position.is_close and self.entry_orders == []:\n self._reset()\n\n should_short = self.should_short()\n # validate that should_short is not True if the exchange_type is spot\n if self.exchange_type == 'spot' and should_short is True:\n raise exceptions.InvalidStrategy(\n 'should_short cannot be True if the exchange type is \"spot\".'\n )\n\n should_long = self.should_long()\n\n # should_short and should_long cannot be True at the same time\n if should_short and should_long:\n raise exceptions.ConflictingRules(\n 'should_short and should_long should not be true at the same time.'\n )\n\n if should_long:\n self._execute_long()\n elif should_short:\n self._execute_short()",
"def test_error_statuses(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n\n # We will call execute 4 times. It will throw 3 errors and 1 valid\n block._execute_snmp_request = MagicMock(\n side_effect=[SAMPLE_ERROR_SNMP_RESPONSE,\n SAMPLE_ERROR_STATUS_SNMP_RESPONSE,\n SAMPLE_SNMP_RESPONSE,\n Exception])\n block._handle_data = MagicMock()\n\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}]\n })\n block.start()\n\n # Send 4 signals to the block, causing 4 requests to go out\n block.process_signals([Signal({\"sig\": i}) for i in range(4)])\n\n # Execute request should have been called 4 times\n self.assertEqual(block._execute_snmp_request.call_count, 4)\n\n # Handle data should only be called for the valid response\n self.assertEqual(block._handle_data.call_count, 1)\n self.assertEqual(block._handle_data.call_args[0][0], [])\n block.stop()",
"def handle_upld_data(self, job):\n if job.data[\"high_reliability\"]:\n # Upload the file to all servers\n self.put_job_in_all_queues(job)\n\n list_job_results = self.get_internal_results_from_all_servers()\n\n if len(list_job_results) == 0:\n # We got no responses back, there are probably no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers responded\"))\n return\n\n # Check all the servers had success\n for result in list_job_results:\n if result.result[\"outcome\"] != \"success\":\n self.put_external_result(\n self.generate_failure_job(\"Unsuccessful, one of the servers did not have success\"))\n return\n\n # Tell the client we successfully uploaded to all servers\n response_result = copy.deepcopy(list_job_results[0])\n response_result.processed_by = None\n self.put_external_result(response_result)\n\n else:\n\n # Check we recognise the token\n if job.token not in self.UPLD_TOKEN_DICT:\n print(\"UNRECOGNISED TOKEN: {}\".format(job.token))\n return\n\n # Pass the job onto the server associated with the job token\n server_name = self.UPLD_TOKEN_DICT[job.token]\n self.put_job_in_specific_server_queue(job, server_name)\n\n # Get the result from the server and pass it back to the client\n result = self.get_internal_result_from_server(server_name,\n timeout=30\n if job.data[\"file_size\"] > 2 * 2 ** 20 else 4)\n self.put_external_result(result)",
"def pytest_finished_handling_group(session, worker):",
"def test_create(self):\n assert self.worker.connection is None or self.worker.connection.is_alive()\n # TODO(orlade): Mock this stuff.\n # assert_queue_size({TEST_REQUEST_QUEUE: 0, TEST_RESULT_QUEUE: 0})",
"def test_valid_input_succeeds(self, async_patch, chan_patch):\n self.assertTrue(send_rotate_to_can(self.USER, self.BIN_NUM))\n async_patch.assert_called_once()\n chan_patch.assert_called_once()",
"async def _do_work_claim(self) -> bool:\n # 1. Ask the LTA DB for the next Bundle to be deleted\n # configure a RestClient to talk to the LTA DB\n lta_rc = ClientCredentialsAuth(address=self.lta_rest_url,\n token_url=self.lta_auth_openid_url,\n client_id=self.client_id,\n client_secret=self.client_secret,\n timeout=self.work_timeout_seconds,\n retries=self.work_retries)\n self.logger.info(\"Asking the LTA DB for a Bundle to check for TransferRequest being finished.\")\n pop_body = {\n \"claimant\": f\"{self.name}-{self.instance_uuid}\"\n }\n response = await lta_rc.request('POST', f'/Bundles/actions/pop?source={self.source_site}&dest={self.dest_site}&status={self.input_status}', pop_body)\n self.logger.info(f\"LTA DB responded with: {response}\")\n bundle = response[\"bundle\"]\n if not bundle:\n self.logger.info(\"LTA DB did not provide a Bundle to check. Going on vacation.\")\n return False\n # update the TransferRequest that spawned the Bundle, if necessary\n await self._update_transfer_request(lta_rc, bundle)\n # even if we processed a Bundle, take a break between Bundles\n return False",
"def test_successful(self, mock_create, mock_msg_mgr):\n\n json_data = {\n \"input\" : {\n 'version': '6',\n 'files': {'input_a': [self.source_file.id]},\n 'json': {}\n },\n \"job_type_id\" : self.job_type1.pk\n }\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n result = json.loads(response.content)\n\n #Response should be new v6 job detail response\n self.assertEqual(result['execution'], None)\n self.assertEqual(result['max_tries'], 3)\n self.assertTrue('/%s/jobs/' % self.api in response['location'])\n mock_create.assert_called_once()"
]
| [
"0.6279267",
"0.61867285",
"0.5969139",
"0.5968029",
"0.59320337",
"0.589347",
"0.58139",
"0.5768133",
"0.5751717",
"0.5734699",
"0.57074887",
"0.56824577",
"0.5675357",
"0.56739825",
"0.56538695",
"0.5651469",
"0.56380355",
"0.56136626",
"0.5596965",
"0.5578065",
"0.5570784",
"0.55557674",
"0.5547995",
"0.55383825",
"0.5536843",
"0.5530806",
"0.5522455",
"0.55204636",
"0.5517962",
"0.55143946"
]
| 0.65378636 | 0 |
Integration test of the preparer/throttler workflow. | def test_preparer_throttler_submitter(rse_factory, did_factory, root_account, file_config_mock, core_config_mock, metrics_mock):
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
dst_rse1, dst_rse_id1 = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
dst_rse2, dst_rse_id2 = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')
all_rses = [src_rse_id, dst_rse_id1, dst_rse_id2]
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
distance_core.add_distance(src_rse_id, dst_rse_id1, distance=10)
distance_core.add_distance(src_rse_id, dst_rse_id2, distance=10)
# Set limits only for one of the RSEs
request_core.set_transfer_limit(dst_rse1, max_transfers=1, activity='all_activities', strategy='fifo')
did1 = did_factory.upload_test_file(src_rse)
did2 = did_factory.upload_test_file(src_rse)
rule_core.add_rule(dids=[did1], account=root_account, copies=1, rse_expression=dst_rse1, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)
rule_core.add_rule(dids=[did2], account=root_account, copies=1, rse_expression=dst_rse1, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)
rule_core.add_rule(dids=[did1], account=root_account, copies=1, rse_expression=dst_rse2, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)
assert request['state'] == RequestState.PREPARING
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)
assert request['state'] == RequestState.PREPARING
request = request_core.get_request_by_did(rse_id=dst_rse_id2, **did1)
assert request['state'] == RequestState.PREPARING
# submitter must not work on PREPARING replicas
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
# One RSE has limits set: the requests will be moved to WAITING status; the other RSE has no limits: go directly to queued
preparer(once=True, sleep_time=1, bulk=100, partition_wait_time=0, ignore_availability=False)
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)
assert request['state'] == RequestState.WAITING
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)
assert request['state'] == RequestState.WAITING
request = request_core.get_request_by_did(rse_id=dst_rse_id2, **did1)
assert request['state'] == RequestState.QUEUED
# submitter must not work on WAITING replicas
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
# One of the waiting requests will be queued, the second will remain in waiting state
throttler(once=True, partition_wait_time=0)
# Check metrics.
# This gauge values are recorded at the beginning of the execution. Hence 2 waiting and 0 transfers
gauge_name = 'rucio_daemons_conveyor_throttler_rse_transfer_limits'
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'residual_capacity'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'max_transfers'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'active'}) == 0
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'waiting'}) == 2
request1 = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)
request2 = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)
# one request WAITING and other QUEUED
assert (request1['state'] == RequestState.WAITING and request2['state'] == RequestState.QUEUED
or request1['state'] == RequestState.QUEUED and request2['state'] == RequestState.WAITING)
waiting_did = did1 if request1['state'] == RequestState.WAITING else did2
queued_did = did1 if request1['state'] == RequestState.QUEUED else did2
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
# Calling the throttler again will not schedule the waiting request, because there is a submitted one
throttler(once=True, partition_wait_time=0)
# This gauge values are recorded at the beginning of the execution. Hence 1 waiting and one transfer
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'residual_capacity'}) == 0
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'max_transfers'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'active'}) == 1
assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'waiting'}) == 1
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **waiting_did)
assert request['state'] == RequestState.WAITING
request = __wait_for_state_transition(dst_rse_id=dst_rse_id1, **queued_did)
assert request['state'] == RequestState.DONE
request = __wait_for_state_transition(dst_rse_id=dst_rse_id2, **did1)
assert request['state'] == RequestState.DONE
# Now that the submitted transfers are finished, the WAITING one can be queued
throttler(once=True, partition_wait_time=0)
request = request_core.get_request_by_did(rse_id=dst_rse_id1, **waiting_did)
assert request['state'] == RequestState.QUEUED | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def before_test(self, func, *args, **kwargs):\n pass",
"def test(self):\n return test_throttle_method()",
"def testJobTRSetRandomRun(databases):\n\n class CustomGenerator(DataGenerator):\n chanceChainProduct = 0.4\n numTaskRunners = 5\n chanceTRFramework = 0.7\n chanceTRAllowedForJob = 0.7\n chanceTRAllowedForTask = 0.5\n chanceTRSetOverride = 0.4\n\n def frameworksForTaskRunner(self):\n return [\n framework for framework in self.frameworks\n if self.rnd.random() < self.chanceTRFramework\n ]\n\n def createConfiguration(self):\n def randomTRSet(chance):\n return (\n tr for tr in self.taskRunners\n if self.rnd.random() < chance\n )\n config = DataGenerator.createConfiguration(self)\n config._setRunners(randomTRSet(self.chanceTRAllowedForJob))\n for task in config.getTasks():\n if self.rnd.random() < self.chanceTRSetOverride:\n task._setRunners(\n randomTRSet(self.chanceTRAllowedForTask)\n )\n config._notify()\n return config\n\n def checkResults(gen, job):\n\n def checkExecutionFinishedTask(task):\n assert task.isDone()\n taskRunners = task.getRunners() or job.getRunners()\n runnerId = task['runner']\n if taskRunners:\n assert runnerId in taskRunners\n trCaps = databases.resourceDB[runnerId].capabilities\n for cap in task.getNeededCaps():\n assert cap in trCaps\n\n def allInputsReady(task):\n for input in task.getInputs():\n if not job.getProduct(input).isAvailable():\n return False\n return True\n\n def checkTaskRunners(task, onlyThis = None):\n if onlyThis is not None:\n taskRunners = [onlyThis]\n else:\n taskRunners = task.getRunners() or job.getRunners()\n for runnerId in taskRunners:\n # Target is not checked here, because DataGenerator uses\n # the same target for the job and all the task runners.\n assert not databases.resourceDB[runnerId].capabilities \\\n >= task.getNeededCaps()\n\n def checkNotDone(tasksNotDone, noTasksDone, runnerId):\n #assert noTasksDone\n if runnerId is None:\n assert noTasksDone\n else:\n assert len(tasksNotDone) != 0\n for task in tasksNotDone:\n if allInputsReady(task):\n assert runnerId not in \\\n (task.getRunners() or job.getRunners())\n\n for item in job.getTaskGroupSequence():\n if isinstance(item, TaskGroup):\n runnerId = item.getRunnerId()\n neededCaps = item.getNeededCaps()\n noTasksDone = True\n tasksNotDone = []\n taskRunners = None\n for task in item.getChildren():\n runners = task.getRunners() or job.getRunners()\n if runners:\n if taskRunners is None:\n taskRunners = set(runners)\n else:\n taskRunners &= runners\n if task.isExecutionFinished():\n checkExecutionFinishedTask(task)\n assert task['runner'] == runnerId\n noTasksDone = False\n else:\n tasksNotDone.append(task)\n if taskRunners is None:\n assert len(tasksNotDone) == 0\n elif taskRunners:\n if runnerId in taskRunners:\n for task in tasksNotDone:\n if allInputsReady(task):\n checkTaskRunners(task, runnerId)\n else:\n checkNotDone(tasksNotDone, noTasksDone, runnerId)\n else:\n checkNotDone(tasksNotDone, noTasksDone, runnerId)\n else:\n task = item # item is a task\n if task.isExecutionFinished():\n checkExecutionFinishedTask(task)\n elif allInputsReady(task):\n checkTaskRunners(task)\n\n seed = 123456789\n rnd = random.Random(seed)\n runs = 10\n randomRuns(databases, runs, rnd, CustomGenerator, checkResults)",
"def trial_prep(self):\n pass",
"def test_process_data(self):\n pass",
"def test_run_started(self):",
"def test_03(self):\n e = Emulator()\n e.init()\n e.make_transfer_prepare_condition()\n\n Emulator.run_transfer_prepare()\n qs = TransferPrepare.objects.filter(is_processed=False)\n assert qs.count() > 0\n\n Emulator.run_transfer_donkies_prepare()\n\n qs = TransferPrepare.objects.filter(is_processed=False)\n assert qs.count() == 0",
"def test_batch(self):\n pass",
"def setUp(self):\n self.t = Task()\n self.t(\"add one mississippi\")\n self.t(\"add two mississippi\")",
"def test_create_run(self):\n pass",
"def test_basic_execution(self):",
"def test_post_chain(self):\n pass",
"def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n self.work = models.Work.objects.create(title=\"Test Work\")\n self.book = models.Edition.objects.create(\n title=\"Example Edition\",\n remote_id=\"https://example.com/book/1\",\n parent_work=self.work,\n )\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.delay\"):\n self.shelf = models.Shelf.objects.create(\n name=\"Test Shelf\", identifier=\"test-shelf\", user=self.local_user\n )\n models.SiteSettings.objects.create()",
"def setUp(self):\n self.pcp = ControllerQueue(None, None)\n DummyWorkItem.results = {}",
"def test_functionality(self):\n self.templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()",
"def test_task_preloading(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task1.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task2 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task2.get('info'), task2\r\n # Check that both tasks are different\r\n assert task1.get('id') != task2.get('id'), \"Tasks should be different\"\r\n ## Save the assigned task\r\n assigned_tasks.append(task1)\r\n assigned_tasks.append(task2)\r\n\r\n # Submit an Answer for the assigned and pre-loaded task\r\n for t in assigned_tasks:\r\n tr = dict(app_id=t['app_id'], task_id=t['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # Get two tasks again\r\n res = self.app.get('api/app/1/newtask')\r\n task3 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task3.get('info'), task1\r\n # Pre-load the next task for the user\r\n res = self.app.get('api/app/1/newtask?offset=1')\r\n task4 = json.loads(res.data)\r\n # Check that we received a Task\r\n assert task4.get('info'), task2\r\n # Check that both tasks are different\r\n assert task3.get('id') != task4.get('id'), \"Tasks should be different\"\r\n assert task1.get('id') != task3.get('id'), \"Tasks should be different\"\r\n assert task2.get('id') != task4.get('id'), \"Tasks should be different\"\r\n # Check that a big offset returns None\r\n res = self.app.get('api/app/1/newtask?offset=11')\r\n assert json.loads(res.data) == {}, res.data",
"def test_begin(self):",
"def test_preprocessed_data(self):\n self.assertEqual(self.tester.preprocessed_data, [1, 2])",
"def test_int(self):\n output, _err = self.executor.prepare('do-stuff', 'special', verbosity=5).batch()\n self.assertEqual(output, 'doing stuff very specially')",
"def test_prepare_on_run(self):\n class Mock(object):\n def __init__(self):\n self.t_max = None\n self.dt = None\n\n def evolve(self, t, dt):\n pass\n\n def prepare(self, t_max, dt):\n self.t_max = t_max\n self.dt = dt\n\n t_max = 10.0\n dt = 0.2\n \n G = Mock()\n sim = simulation.Simulation(G, dt=dt)\n self.assertIsNone(G.t_max)\n self.assertIsNone(G.dt)\n\n sim.run(t_max)\n self.assertEqual(G.t_max, t_max)\n self.assertEqual(G.dt, dt)",
"def test_posthardwares(self):\n pass",
"def setUp(self):\n self.t = Task()",
"def setUp(self):\n self.t = Task()",
"def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()",
"def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n work = models.Work.objects.create(title=\"Work\")\n self.book = models.Edition.objects.create(\n title=\"Example Edition\",\n remote_id=\"https://example.com/book/1\",\n parent_work=work,\n )\n\n with patch(\n \"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"\n ), patch(\"bookwyrm.lists_stream.remove_list_task.delay\"):\n self.list = models.List.objects.create(\n name=\"Test List\", user=self.local_user\n )\n self.anonymous_user = AnonymousUser\n self.anonymous_user.is_authenticated = False\n\n models.SiteSettings.objects.create()",
"def test_integration1(self):\n self._test_integration(1)",
"def startTestHook(self):",
"def test_retry_run(self):\n pass",
"def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n )\n with patch(\"bookwyrm.models.user.set_remote_server.delay\"):\n self.remote_user = models.User.objects.create_user(\n \"rat\",\n \"[email protected]\",\n \"ratword\",\n remote_id=\"http://example.com/rat\",\n local=False,\n )\n self.book = models.Edition.objects.create(\n title=\"Test Book\",\n parent_work=models.Work.objects.create(title=\"Test work\"),\n )",
"def runtest(self):"
]
| [
"0.59932137",
"0.59894603",
"0.59680325",
"0.5961236",
"0.59596145",
"0.595309",
"0.58936113",
"0.5890901",
"0.5886319",
"0.5860065",
"0.584749",
"0.584278",
"0.58363724",
"0.58184135",
"0.58139724",
"0.5780692",
"0.57532215",
"0.57527906",
"0.5749115",
"0.5746729",
"0.57432675",
"0.5733495",
"0.5733495",
"0.571596",
"0.5714979",
"0.569511",
"0.5685771",
"0.5678612",
"0.56566334",
"0.5654813"
]
| 0.653352 | 0 |
Prepares the XRD RSEs for an overwrite_on_tape test. fakes that one xroot RSE is a tape destination (and rollbacks the change after the test) Return a factory which allows to upload/register/add_rule for two dids | def overwrite_on_tape_topology(rse_factory, did_factory, root_account, vo, file_factory):
rse1 = 'XRD1'
rse1_id = rse_core.get_rse_id(rse=rse1, vo=vo)
rse2 = 'XRD3'
rse2_id = rse_core.get_rse_id(rse=rse2, vo=vo)
rse3 = 'XRD4'
rse3_id = rse_core.get_rse_id(rse=rse3, vo=vo)
def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):
"""
Create and upload real files to source and destination. Don't register it on destination. This way, fts will fail if overwrite = False
If simulate_dst_corrupted is True, will upload a different file to destination, to simulate that it is corrupted
"""
local_file = file_factory.file_generator()
did = did_factory.random_file_did()
did_factory.upload_test_file(src_rse, path=local_file, **did)
did_factory.upload_client.upload(
[
{
'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,
'rse': dst_rse,
'did_scope': did['scope'].external,
'did_name': did['name'],
'no_register': True,
}
]
)
return did
def __create_dids(did1_corrupted=True, did2_corrupted=True):
"""
Uploads two files:
- one which requires multiple transfer hop to go to destination
- one which can be transferred in one hop to destination rse
"""
# multihop transfer:
did1 = __generate_and_upload_file(rse1, rse3, simulate_dst_corrupted=did1_corrupted)
# direct transfer
did2 = __generate_and_upload_file(rse2, rse3, simulate_dst_corrupted=did2_corrupted)
rule_core.add_rule(dids=[did1, did2], account=root_account, copies=1, rse_expression=rse3, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
return rse1_id, rse2_id, rse3_id, did1, did2
# Fake that destination RSE is a tape
rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.TAPE})
try:
rse_core.add_rse_attribute(rse3_id, 'archive_timeout', 60)
yield __create_dids
finally:
rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.DISK})
rse_core.del_rse_attribute(rse3_id, 'archive_timeout') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_two_multihops_same_intermediate_rse(rse_factory, did_factory, root_account, core_config_mock, caches_mock):\n # +------+ +------+ +------+ +------+ +------+\n # | | | | | | | | | |\n # | RSE1 +--->| RSE2 +--->| RSE3 +--->| RSE4 +--->| RSE5 |\n # | | | | | | | | | |\n # +------+ +------+ +---+--+ +------+ +------+\n # |\n # | +------+ +------+\n # | | | | |\n # +------>| RSE6 +--->| RSE7 |\n # | | | |\n # +------+ +------+\n _, _, reaper_cache_region = caches_mock\n rse1, rse1_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse2, rse2_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse3, rse3_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse4, rse4_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse5, rse5_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse6, rse6_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse7, rse7_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [rse1_id, rse2_id, rse3_id, rse4_id, rse5_id, rse6_id, rse7_id]\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n rse_core.set_rse_limits(rse_id=rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=rse_id, source='storage', used=1, free=0)\n distance_core.add_distance(rse1_id, rse2_id, distance=10)\n distance_core.add_distance(rse2_id, rse3_id, distance=10)\n distance_core.add_distance(rse3_id, rse4_id, distance=10)\n distance_core.add_distance(rse4_id, rse5_id, distance=10)\n distance_core.add_distance(rse3_id, rse6_id, distance=10)\n distance_core.add_distance(rse6_id, rse7_id, distance=10)\n\n did = did_factory.upload_test_file(rse1)\n rule_core.add_rule(dids=[did], account=root_account, copies=2, rse_expression=f'{rse5}|{rse7}', grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_submit(file):\n # Simulate using the mock gfal plugin a transfer failure\n file['sources'] = [set_query_parameters(s_url, {'errno': 2}) for s_url in file['sources']]\n\n # Submit the first time, but force a failure to verify that retries are correctly handled\n with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the transfer without simulating a failure. Everything should go as normal starting now.\n for _ in range(4):\n # for multihop, finisher works one hop at a time. 4 is the maximum number of hops in this test graph\n finisher(once=True, partition_wait_time=0)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # one request must be submitted, but the second will only be queued\n if request_core.get_request_by_did(rse_id=rse5_id, **did)['state'] == RequestState.QUEUED:\n rse_id_second_to_last_queued, rse_id_queued = rse4_id, rse5_id\n rse_id_second_to_last_submit, rse_id_submitted = rse6_id, rse7_id\n else:\n rse_id_second_to_last_queued, rse_id_queued = rse6_id, rse7_id\n rse_id_second_to_last_submit, rse_id_submitted = rse4_id, rse5_id\n request = request_core.get_request_by_did(rse_id=rse_id_queued, **did)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse_id_submitted, **did)\n assert request['state'] == RequestState.SUBMITTED\n\n # Calling submitter again will not unblock the queued requests\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_submitted, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n request = request_core.get_request_by_did(rse_id=rse_id_queued, **did)\n assert request['state'] == RequestState.QUEUED\n\n # Once the submitted transfer is done, the submission will continue for second request (one hop at a time)\n # First of the remaining two hops submitted\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_second_to_last_queued, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # One of the intermediate replicas is eligible for deletion. Others are blocked by entries in source table\n reaper_cache_region.invalidate()\n reaper(once=True, rses=[], include_rses='|'.join([rse2, rse3, rse4, rse6]), exclude_rses=None)\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=rse_id_second_to_last_submit, **did)\n for rse_id in [rse2_id, rse3_id, rse_id_second_to_last_queued]:\n replica_core.get_replica(rse_id=rse_id, **did)\n\n # Final hop\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_queued, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # All intermediate replicas can be deleted\n reaper_cache_region.invalidate()\n reaper(once=True, rses=[], include_rses='|'.join([rse2, rse3, rse4, rse6]), exclude_rses=None)\n for rse_id in [rse2_id, rse3_id, rse4_id, rse6_id]:\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=rse_id, **did)",
"def test_stager(rse_factory, did_factory, root_account, replica_client):\n src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', rse_type=RSEType.TAPE)\n dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [src_rse_id, dst_rse_id]\n\n distance_core.add_distance(src_rse_id, dst_rse_id, distance=10)\n rse_core.add_rse_attribute(src_rse_id, 'staging_buffer', dst_rse)\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n\n did = did_factory.upload_test_file(src_rse)\n replica = replica_core.get_replica(rse_id=src_rse_id, **did)\n\n replica_client.add_replicas(rse=dst_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'state': 'C',\n 'bytes': replica['bytes'], 'adler32': replica['adler32'], 'md5': replica['md5']}])\n request_core.queue_requests(requests=[{'dest_rse_id': dst_rse_id,\n 'scope': did['scope'],\n 'name': did['name'],\n 'rule_id': '00000000000000000000000000000000',\n 'attributes': {\n 'source_replica_expression': src_rse,\n 'activity': 'Some Activity',\n 'bytes': replica['bytes'],\n 'adler32': replica['adler32'],\n 'md5': replica['md5'],\n },\n 'request_type': RequestType.STAGEIN,\n 'retry_count': 0,\n 'account': root_account,\n 'requested_at': datetime.utcnow()}])\n stager(once=True, rses=[{'id': rse_id} for rse_id in all_rses])\n\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, max_wait_seconds=2 * MAX_POLL_WAIT_SECONDS, **did)\n assert replica['state'] == ReplicaState.AVAILABLE",
"def test_overwrite_hops(overwrite_on_tape_topology, caches_mock, did_factory, file_factory):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3),\n 'rse': rse_core.get_rse_name(rse2_id),\n 'did_scope': did1['scope'].external,\n 'did_name': did1['name'],\n 'no_register': True,\n }\n ]\n )\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n fts_schema_version = FTS3Transfertool(external_host=TEST_FTS_HOST).version()['schema']['major']\n if fts_schema_version >= 8:\n # Newer fts version will honor the overwrite_hop\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n else:\n # FTS only recently introduced the overwrite_hops parameter. It will be ignored on old\n # fts versions and the first hop will fail with the file exists error\n # TODO: remove this else after FTS 3.12 release and after updating rucio/fts container with the new release\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']",
"def test_multihop_intermediate_replica_lifecycle(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock):\n src_rse1_name = 'XRD1'\n src_rse1_id = rse_core.get_rse_id(rse=src_rse1_name, vo=vo)\n src_rse2_name = 'XRD2'\n src_rse2_id = rse_core.get_rse_id(rse=src_rse2_name, vo=vo)\n jump_rse_name = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse_name, vo=vo)\n dst_rse_name = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse_name, vo=vo)\n\n all_rses = [src_rse1_id, src_rse2_id, jump_rse_id, dst_rse_id]\n did = did_factory.upload_test_file(src_rse1_name)\n\n # Copy replica to a second source. To avoid the special case of having a unique last replica, which could be handled in a special (more careful) way\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=src_rse2_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=src_rse2_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rse_core.set_rse_limits(rse_id=jump_rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=jump_rse_id, source='storage', used=1, free=0)\n try:\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n\n # Submit transfers to FTS\n # Ensure a replica was created on the intermediary host with epoch tombstone\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.SUBMITTED\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['tombstone'] == datetime(year=1970, month=1, day=1)\n assert replica['state'] == ReplicaState.COPYING\n\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # Fake an existing unused source with raking of 0 for the second source.\n # The ranking of this source should remain at 0 till the end.\n\n @transactional_session\n def __fake_source_ranking(*, session=None):\n models.Source(request_id=request['id'],\n scope=request['scope'],\n name=request['name'],\n rse_id=src_rse2_id,\n dest_rse_id=request['dest_rse_id'],\n ranking=0,\n bytes=request['bytes'],\n url=None,\n is_using=False). \\\n save(session=session, flush=False)\n\n __fake_source_ranking()\n\n # The intermediate replica is protected by its state (Copying)\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.COPYING\n\n # Wait for the intermediate replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # ensure tha the ranking was correct for all sources and intermediate rses\n assert __get_source(request_id=request['id'], src_rse_id=src_rse1_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=src_rse2_id, **did).ranking == 0\n # Only group_bulk=1 part of the path was submitted.\n # run submitter again to copy from jump rse to destination rse\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Wait for the destination replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses='test_container_xrd=True', exclude_rses=None)\n\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=jump_rse_id, **did)\n\n # 3 request: copy to second source + 2 hops (each separately)\n # Use inequalities, because there can be left-overs from other tests\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 3\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_common_submit_transfer_total') >= 3\n # at least the failed hop\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') > 0\n finally:\n\n @transactional_session\n def _cleanup_all_usage_and_limits(rse_id, *, session=None):\n session.query(models.RSELimit).filter_by(rse_id=rse_id).delete()\n session.query(models.RSEUsage).filter_by(rse_id=rse_id, source='storage').delete()\n\n _cleanup_all_usage_and_limits(rse_id=jump_rse_id)",
"def test_preparer_ignore_availability(rse_factory, did_factory, root_account, file_config_mock):\n\n def __setup_test():\n src_rse, src_rse_id = rse_factory.make_posix_rse()\n dst_rse, dst_rse_id = rse_factory.make_posix_rse()\n\n distance_core.add_distance(src_rse_id, dst_rse_id, distance=10)\n for rse_id in [src_rse_id, dst_rse_id]:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n did = did_factory.upload_test_file(src_rse)\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n rse_core.update_rse(src_rse_id, {'availability_read': False})\n\n return src_rse_id, dst_rse_id, did\n\n src_rse_id, dst_rse_id, did = __setup_test()\n preparer(once=True, sleep_time=1, bulk=100, partition_wait_time=0, ignore_availability=False)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n assert request['state'] == RequestState.NO_SOURCES\n\n src_rse_id, dst_rse_id, did = __setup_test()\n preparer(once=True, sleep_time=1, bulk=100, partition_wait_time=0, ignore_availability=True)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n assert request['state'] == RequestState.QUEUED",
"def test_preparer_throttler_submitter(rse_factory, did_factory, root_account, file_config_mock, core_config_mock, metrics_mock):\n src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n dst_rse1, dst_rse_id1 = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n dst_rse2, dst_rse_id2 = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [src_rse_id, dst_rse_id1, dst_rse_id2]\n\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n distance_core.add_distance(src_rse_id, dst_rse_id1, distance=10)\n distance_core.add_distance(src_rse_id, dst_rse_id2, distance=10)\n # Set limits only for one of the RSEs\n request_core.set_transfer_limit(dst_rse1, max_transfers=1, activity='all_activities', strategy='fifo')\n\n did1 = did_factory.upload_test_file(src_rse)\n did2 = did_factory.upload_test_file(src_rse)\n rule_core.add_rule(dids=[did1], account=root_account, copies=1, rse_expression=dst_rse1, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n rule_core.add_rule(dids=[did2], account=root_account, copies=1, rse_expression=dst_rse1, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n rule_core.add_rule(dids=[did1], account=root_account, copies=1, rse_expression=dst_rse2, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)\n assert request['state'] == RequestState.PREPARING\n request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)\n assert request['state'] == RequestState.PREPARING\n request = request_core.get_request_by_did(rse_id=dst_rse_id2, **did1)\n assert request['state'] == RequestState.PREPARING\n\n # submitter must not work on PREPARING replicas\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # One RSE has limits set: the requests will be moved to WAITING status; the other RSE has no limits: go directly to queued\n preparer(once=True, sleep_time=1, bulk=100, partition_wait_time=0, ignore_availability=False)\n request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)\n assert request['state'] == RequestState.WAITING\n request = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)\n assert request['state'] == RequestState.WAITING\n request = request_core.get_request_by_did(rse_id=dst_rse_id2, **did1)\n assert request['state'] == RequestState.QUEUED\n\n # submitter must not work on WAITING replicas\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # One of the waiting requests will be queued, the second will remain in waiting state\n throttler(once=True, partition_wait_time=0)\n # Check metrics.\n # This gauge values are recorded at the beginning of the execution. Hence 2 waiting and 0 transfers\n gauge_name = 'rucio_daemons_conveyor_throttler_rse_transfer_limits'\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'residual_capacity'}) == 1\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'max_transfers'}) == 1\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'active'}) == 0\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'waiting'}) == 2\n request1 = request_core.get_request_by_did(rse_id=dst_rse_id1, **did1)\n request2 = request_core.get_request_by_did(rse_id=dst_rse_id1, **did2)\n # one request WAITING and other QUEUED\n assert (request1['state'] == RequestState.WAITING and request2['state'] == RequestState.QUEUED\n or request1['state'] == RequestState.QUEUED and request2['state'] == RequestState.WAITING)\n waiting_did = did1 if request1['state'] == RequestState.WAITING else did2\n queued_did = did1 if request1['state'] == RequestState.QUEUED else did2\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Calling the throttler again will not schedule the waiting request, because there is a submitted one\n throttler(once=True, partition_wait_time=0)\n # This gauge values are recorded at the beginning of the execution. Hence 1 waiting and one transfer\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'residual_capacity'}) == 0\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'max_transfers'}) == 1\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'active'}) == 1\n assert metrics_mock.get_sample_value(gauge_name, labels={'activity': 'all_activities', 'rse': dst_rse1, 'limit_attr': 'waiting'}) == 1\n request = request_core.get_request_by_did(rse_id=dst_rse_id1, **waiting_did)\n assert request['state'] == RequestState.WAITING\n\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id1, **queued_did)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id2, **did1)\n assert request['state'] == RequestState.DONE\n\n # Now that the submitted transfers are finished, the WAITING one can be queued\n throttler(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=dst_rse_id1, **waiting_did)\n assert request['state'] == RequestState.QUEUED",
"def test_overwrite_on_tape(overwrite_on_tape_topology, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']",
"def rse_factory_unittest(request, vo, class_scope_prefix):\n from .temp_factories import TemporaryRSEFactory\n\n with TemporaryRSEFactory(vo=vo, name_prefix=class_scope_prefix) as factory:\n request.cls.rse_factory = factory\n yield factory",
"def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_receive(job_params):\n for job in (job_params if isinstance(job_params, list) else [job_params]):\n for file in job.get('files', []):\n if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'\n and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):\n # Fake that dst_file metadata contains file_on_tape == True\n # As we don't really have tape RSEs in our tests, file_on_tape is always false\n file['file_metadata']['dst_file']['file_on_tape'] = True\n return job_params\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Both transfers must be marked as failed because the file size is incorrect\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False\n # 2 runs: for multihop, finisher works one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Set overwrite to True before running the poller or finisher\n core_config.set('transfers', 'overwrite_corrupted_files', True)\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit one more time. Now the destination file must be overwritten\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'",
"def preservation_derivatives(scope=\"package\"):\n app = create_app(\"test\")\n with app.app_context():\n db.create_all()\n\n storage_service = test_helpers.create_test_storage_service(\n name=STORAGE_SERVICE_NAME\n )\n storage_location = test_helpers.create_test_storage_location(\n storage_service_id=storage_service.id\n )\n _ = test_helpers.create_test_pipeline(storage_service_id=storage_service.id)\n fetch_job = test_helpers.create_test_fetch_job(\n storage_service_id=storage_service.id\n )\n\n aip1 = test_helpers.create_test_aip(\n uuid=AIP_1_UUID,\n transfer_name=AIP_1_NAME,\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n aip2 = test_helpers.create_test_aip(\n uuid=AIP_2_UUID,\n transfer_name=AIP_2_NAME,\n storage_service_id=storage_service.id,\n storage_location_id=storage_location.id,\n fetch_job_id=fetch_job.id,\n )\n\n original_file1 = test_helpers.create_test_file(\n file_type=FileType.original,\n name=ORIGINAL_FILE_1_NAME,\n uuid=ORIGINAL_FILE_1_UUID,\n size=ORIGINAL_FILE_SIZE,\n puid=JPEG_1_01_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_01_FORMAT_VERSION,\n aip_id=aip1.id,\n )\n original_file2 = test_helpers.create_test_file(\n file_type=FileType.original,\n name=ORIGINAL_FILE_2_NAME,\n uuid=ORIGINAL_FILE_2_UUID,\n size=ORIGINAL_FILE_SIZE,\n puid=JPEG_1_02_PUID,\n file_format=JPEG_FILE_FORMAT,\n format_version=JPEG_1_02_FORMAT_VERSION,\n aip_id=aip2.id,\n )\n\n _ = test_helpers.create_test_file(\n file_type=FileType.preservation,\n name=PRESERVATION_FILE_1_NAME,\n uuid=PRESERVATION_FILE_1_UUID,\n size=PRESERVATION_FILE_SIZE,\n puid=TIFF_PUID,\n file_format=TIFF_FILE_FORMAT,\n original_file_id=original_file1.id,\n aip_id=aip1.id,\n )\n _ = test_helpers.create_test_file(\n file_type=FileType.preservation,\n name=PRESERVATION_FILE_2_NAME,\n uuid=PRESERVATION_FILE_2_UUID,\n size=PRESERVATION_FILE_SIZE,\n puid=TIFF_PUID,\n file_format=TIFF_FILE_FORMAT,\n original_file_id=original_file2.id,\n aip_id=aip2.id,\n )\n\n yield app\n\n db.drop_all()",
"def test_multi_vo_certificates(file_config_mock, rse_factory, did_factory, scope_factory, vo, second_vo):\n\n _, [scope1, scope2] = scope_factory(vos=[vo, second_vo])\n\n def __init_test_for_vo(vo, scope):\n src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', vo=vo)\n dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', vo=vo)\n all_rses = [src_rse_id, dst_rse_id]\n\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n distance_core.add_distance(src_rse_id, dst_rse_id, distance=10)\n account = InternalAccount('root', vo=vo)\n did = did_factory.random_file_did(scope=scope)\n replica_core.add_replica(rse_id=src_rse_id, scope=scope, name=did['name'], bytes_=1, account=account, adler32=None, md5=None)\n rule_core.add_rule(dids=[did], account=account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None,\n lifetime=None, locked=False, subscription_id=None, ignore_account_limit=True)\n return all_rses\n\n all_rses = []\n rses = __init_test_for_vo(vo=vo, scope=scope1)\n all_rses.extend(rses)\n rses = __init_test_for_vo(vo=second_vo, scope=scope2)\n all_rses.extend(rses)\n\n certs_used_by_submitter = []\n certs_used_by_poller = []\n\n class _FTSWrapper(FTS3Transfertool):\n # Override fts3 transfertool. Don't actually perform any interaction with fts; and record the certificates used\n def submit(self, transfers, job_params, timeout=None):\n certs_used_by_submitter.append(self.cert[0])\n return generate_uuid()\n\n def bulk_query(self, requests_by_eid, timeout=None):\n certs_used_by_poller.append(self.cert[0])\n return {}\n\n with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n assert sorted(certs_used_by_submitter) == ['DEFAULT_DUMMY_CERT', 'NEW_VO_DUMMY_CERT']\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n poller(once=True, older_than=0, partition_wait_time=0)\n assert sorted(certs_used_by_poller) == ['DEFAULT_DUMMY_CERT', 'NEW_VO_DUMMY_CERT']",
"def create_deformers_backups(source, target, shape_orig, deformers):\n\n # declare return values\n bs_nodes = []\n skin_nodes = []\n cluster_nodes = []\n\n # creates blendshapes nodes backup\n if len(deformers[\"blendShape\"]):\n bs_nodes = create_blendshapes_backup(target, source,\n deformers[\"blendShape\"])\n\n # creates skincluster nodes backup\n if len(deformers[\"skinCluster\"]):\n skin_nodes = create_skincluster_backup(shape_orig,\n deformers[\"skinCluster\"][0])\n # creates clusters nodes backup\n if len(deformers[\"cluster\"]):\n cluster_nodes = create_clusters_backup(target, deformers[\"cluster\"])\n\n return bs_nodes, skin_nodes, cluster_nodes",
"def setup_experiment(testruns, droplist=\"\"):\n ex = Experiment()\n ex.addSoluFile(ALL_SOLU)\n\n regexlist = []\n for x in droplist.split(\",\"):\n # defaultvalue, if empty we don't want to exclude everything\n if x == \"\":\n continue\n try:\n y = re.compile(x)\n regexlist.append(y)\n except:\n pass\n\n excluded_inst = []\n # get data\n for t in testruns:\n # update representation\n additional_data = {\"RubberbandId\": get_rbid_representation(t, \"extended\")}\n\n # collect data and pass to ipet\n ipettestrun = TestRun()\n tr_raw_data = t.get_data(add_data=additional_data)\n\n tr_data = {}\n for i in tr_raw_data.keys():\n for r in regexlist:\n if r.match(i):\n excluded_inst.append(i)\n break\n else:\n tr_data[i] = tr_raw_data[i]\n\n ipettestrun.data = pd.DataFrame(tr_data).T\n\n ex.testruns.append(ipettestrun)\n return ex, excluded_inst",
"def test_cancel_rule(rse_factory, did_factory, root_account):\n src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [src_rse_id, dst_rse_id]\n\n distance_core.add_distance(src_rse_id, dst_rse_id, distance=10)\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n\n did = did_factory.upload_test_file(src_rse)\n\n [rule_id] = rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_submit(file):\n # Simulate using the mock gfal plugin that it takes a long time to copy the file\n file['sources'] = [set_query_parameters(s_url, {'time': 30}) for s_url in file['sources']]\n\n with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n\n rule_core.delete_rule(rule_id)\n\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n\n fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query({request['external_id']: {request['id']: request}})\n assert fts_response[request['external_id']][request['id']].job_response['job_state'] == 'CANCELED'",
"def test_identity_multiple_tape(self, dev, tmpdir, monkeypatch):\n qml.enable_tape()\n\n dev = qml.device(dev, wires=2, keep_files=False)\n\n with qml.tape.QuantumTape() as tape1:\n qml.RX(0.133, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n\n with qml.tape.QuantumTape() as tape2:\n qml.RX(0.432, wires=0)\n qml.expval(qml.Identity(wires=[0]))\n qml.expval(qml.Identity(wires=[1]))\n\n circuits = [tape1, tape2]\n\n test_uuid = \"1234\"\n with monkeypatch.context() as m:\n m.setattr(pennylane_orquestra.cli_actions, \"user_data_dir\", lambda *args: tmpdir)\n\n # Disable submitting to the Orquestra platform by mocking Popen\n m.setattr(subprocess, \"Popen\", lambda *args, **kwargs: MockPopen())\n m.setattr(\n pennylane_orquestra.orquestra_device,\n \"loop_until_finished\",\n lambda *args, **kwargs: None,\n )\n\n # Disable random uuid generation\n m.setattr(uuid, \"uuid4\", lambda *args: test_uuid)\n\n res = dev.batch_execute(circuits)\n\n # No workflow files were created because we only computed with\n # identities\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n assert not os.path.exists(tmpdir.join(f\"expval-{test_uuid}.yaml\"))\n\n expected = [\n np.ones(1),\n np.ones(2),\n ]\n\n for r, e in zip(res, expected):\n assert np.allclose(r, e)\n\n qml.disable_tape()",
"def test_checksum_validation(rse_factory, did_factory, root_account):\n src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n dst_rse1, dst_rse1_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n dst_rse2, dst_rse2_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n dst_rse3, dst_rse3_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [src_rse_id, dst_rse1_id, dst_rse2_id, dst_rse3_id]\n\n for rse_id in [dst_rse1_id, dst_rse2_id, dst_rse3_id]:\n distance_core.add_distance(src_rse_id, rse_id, distance=10)\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n\n rse_core.add_rse_attribute(src_rse_id, 'supported_checksums', 'adler32')\n rse_core.add_rse_attribute(dst_rse1_id, 'verify_checksum', False)\n rse_core.add_rse_attribute(dst_rse2_id, 'supported_checksums', 'md5')\n rse_core.add_rse_attribute(dst_rse3_id, 'supported_checksums', 'md5,adler32')\n\n did = did_factory.upload_test_file(src_rse)\n replica = replica_core.get_replica(rse_id=src_rse_id, **did)\n\n rule_core.add_rule(dids=[did], account=root_account, copies=3, rse_expression=f'{dst_rse1}|{dst_rse2}|{dst_rse3}', grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_submit(file):\n # Set the correct checksum on source and simulate a wrong checksum on destination\n file['sources'] = [set_query_parameters(s_url, {'checksum': replica['adler32']}) for s_url in file['sources']]\n file['destinations'] = [set_query_parameters(d_url, {'checksum': 'randomString2'}) for d_url in file['destinations']]\n\n with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Checksum verification disabled on this rse, so the transfer must use source validation and succeed\n request = __wait_for_state_transition(dst_rse_id=dst_rse1_id, **did)\n assert request['state'] == RequestState.DONE\n\n # No common supported checksum between the source and destination rse. It will verify the destination rse checksum and fail\n request = __wait_for_state_transition(dst_rse_id=dst_rse2_id, **did)\n assert request['state'] == RequestState.FAILED\n assert 'User and destination checksums do not match' in request['err_msg']\n\n # Common checksum exists between the two. It must use \"both\" validation strategy and fail\n request = __wait_for_state_transition(dst_rse_id=dst_rse3_id, **did)\n assert 'Source and destination checksums do not match' in request['err_msg']\n assert request['state'] == RequestState.FAILED",
"def test_noregen(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n # 1\n t1 = treantclass('newone')\n t2 = treantclass('newone', new=True)\n assert t1.uuid != t2.uuid\n\n with pytest.raises(dtr.treants.MultipleTreantsError):\n t3 = treantclass('newone')",
"def setUp(self):\n\n # This test suite needs actual depots.\n pkg5unittest.ManyDepotTestCase.setUp(self, [\"test1\", \"test1\",\n \"test2\", \"test2\"], start_depots=True)\n\n self.make_misc_files(self.misc_files)\n\n self.dpath1 = self.dcs[1].get_repodir()\n self.durl1 = self.dcs[1].get_depot_url()\n self.published = self.pkgsend_bulk(self.durl1, (self.amber10,\n self.amber20, self.bronze10, self.bronze20))\n\n # Purposefully republish bronze20 a second later so a version\n # exists that only differs in timestamp. Also publish tree\n # and scheme after that.\n time.sleep(1)\n self.published.extend(self.pkgsend_bulk(self.durl1,\n (self.bronze20, self.tree10, self.branch10, self.leaf10,\n self.scheme10)))\n\n self.dpath2 = self.dcs[2].get_repodir()\n self.durl2 = self.dcs[2].get_depot_url()\n self.tempdir = tempfile.mkdtemp(dir=self.test_root)\n\n self.durl3 = self.dcs[3].get_depot_url()\n self.durl4 = self.dcs[4].get_depot_url()",
"def setUp(self):\n PatientIDSettings.objects.create()\n User.objects.create_user('temporary', '[email protected]', 'temporary')\n\n dx1 = \"test_files/DX-Im-Carestream_DR7500-1.dcm\"\n dx2 = \"test_files/DX-Im-Carestream_DR7500-2.dcm\"\n dx3 = \"test_files/DX-Im-Carestream_DRX.dcm\"\n dx4 = \"test_files/DX-Im-GE_XR220-1.dcm\"\n dx5 = \"test_files/DX-Im-GE_XR220-2.dcm\"\n dx6 = \"test_files/DX-Im-GE_XR220-3.dcm\"\n dx7 = \"test_files/DX-RDSR-Canon_CXDI.dcm\"\n dx8 = \"test_files/DX-RDSR-Carestream_DRXEvolution.dcm\"\n root_tests = os.path.dirname(os.path.abspath(__file__))\n path_dx1 = os.path.join(root_tests, dx1)\n path_dx2 = os.path.join(root_tests, dx2)\n path_dx3 = os.path.join(root_tests, dx3)\n path_dx4 = os.path.join(root_tests, dx4)\n path_dx5 = os.path.join(root_tests, dx5)\n path_dx6 = os.path.join(root_tests, dx6)\n path_dx7 = os.path.join(root_tests, dx7)\n path_dx8 = os.path.join(root_tests, dx8)\n\n dx.dx(path_dx1)\n dx.dx(path_dx2)\n dx.dx(path_dx3)\n dx.dx(path_dx4)\n dx.dx(path_dx5)\n dx.dx(path_dx6)\n rdsr.rdsr(path_dx7)\n rdsr.rdsr(path_dx8)",
"def setup_target_redshift(self):\n self.run_query_target_redshift(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres CASCADE'\n )\n self.run_query_target_redshift(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_public2 CASCADE'\n )\n self.run_query_target_redshift(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_logical1 CASCADE'\n )\n self.run_query_target_redshift(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_postgres_logical2 CASCADE'\n )\n self.run_query_target_redshift(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_mysql CASCADE'\n )\n self.run_query_target_redshift(\n 'DROP SCHEMA IF EXISTS ppw_e2e_tap_s3_csv CASCADE'\n )\n self.run_query_target_redshift('DROP SCHEMA IF EXISTS ppw_e2e_helper CASCADE')\n self.run_query_target_redshift('CREATE SCHEMA ppw_e2e_helper')\n self.run_query_target_redshift(\n 'CREATE TABLE ppw_e2e_helper.dual (dummy VARCHAR)'\n )\n self.run_query_target_redshift('INSERT INTO ppw_e2e_helper.dual VALUES (\\'X\\')')\n\n # Clean config directory\n shutil.rmtree(os.path.join(CONFIG_DIR, 'redshift'), ignore_errors=True)",
"def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):\n local_file = file_factory.file_generator()\n did = did_factory.random_file_did()\n did_factory.upload_test_file(src_rse, path=local_file, **did)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,\n 'rse': dst_rse,\n 'did_scope': did['scope'].external,\n 'did_name': did['name'],\n 'no_register': True,\n }\n ]\n )\n return did",
"def test_non_deterministic_dst(did_factory, did_client, root_account, vo, caches_mock):\n src_rse = 'XRD3'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n all_rses = [src_rse_id, dst_rse_id]\n\n did = did_factory.upload_test_file(src_rse)\n # Dataset name is part of the non-deterministic path\n dataset = did_factory.make_dataset()\n did_client.add_files_to_dataset(files=[{'scope': did['scope'].external, 'name': did['name']}], scope=dataset['scope'].external, name=dataset['name'])\n\n rse_core.update_rse(rse_id=dst_rse_id, parameters={'deterministic': False})\n try:\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n finally:\n rse_core.update_rse(rse_id=dst_rse_id, parameters={'deterministic': True})",
"def test_create_already_prefixed_samples(self):\n pt = npt.assert_warns(QiitaDBWarning, PrepTemplate.create,\n self.metadata_prefixed, self.new_raw_data,\n self.test_study, self.data_type)\n # The returned object has the correct id\n self.assertEqual(pt.id, 2)\n\n # The row in the prep template table has been created\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_template WHERE prep_template_id=2\")\n # prep_template_id, data_type_id, raw_data_id, preprocessing_status,\n # investigation_type\n self.assertEqual(obs, [[2, 2, 5, 'not_preprocessed', None]])\n\n # The relevant rows to common_prep_info have been added.\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.common_prep_info WHERE prep_template_id=2\")\n # prep_template_id, sample_id, study_id, center_name,\n # center_project_name, emp_status_id\n exp = [[2, '1.SKB8.640193', 'ANL', 'Test Project', 1],\n [2, '1.SKD8.640184', 'ANL', 'Test Project', 1],\n [2, '1.SKB7.640196', 'ANL', 'Test Project', 1]]\n self.assertEqual(sorted(obs), sorted(exp))\n\n # The relevant rows have been added to the prep_columns table\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_columns WHERE prep_template_id=2\")\n # prep_template_id, column_name, column_type\n exp = [[2, 'str_column', 'varchar'],\n [2, 'ebi_submission_accession', 'varchar'],\n [2, 'run_prefix', 'varchar'],\n [2, 'barcodesequence', 'varchar'],\n [2, 'linkerprimersequence', 'varchar'],\n [2, 'platform', 'varchar'],\n [2, 'experiment_design_description', 'varchar'],\n [2, 'library_construction_protocol', 'varchar']]\n self.assertEqual(sorted(obs), sorted(exp))\n\n # The new table exists\n self.assertTrue(exists_table(\"prep_2\", self.conn_handler))\n\n # The new table hosts the correct values\n obs = self.conn_handler.execute_fetchall(\n \"SELECT * FROM qiita.prep_2\")\n # sample_id, study_id, str_column, ebi_submission_accession,\n # run_prefix, barcodesequence, linkerprimersequence\n exp = [['1.SKB7.640196', 'Value for sample 3', 'ILLUMINA',\n 's_G1_L002_sequences', 'CCTCTGAGAGCT', None,\n 'GTGCCAGCMGCCGCGGTAA', 'BBBB', 'AAAA'],\n ['1.SKB8.640193', 'Value for sample 1', 'ILLUMINA',\n 's_G1_L001_sequences', 'GTCCGCAAGTTA', None,\n 'GTGCCAGCMGCCGCGGTAA', 'BBBB', 'AAAA'],\n ['1.SKD8.640184', 'Value for sample 2', 'ILLUMINA',\n 's_G1_L001_sequences', 'CGTAGAGCTCTC', None,\n 'GTGCCAGCMGCCGCGGTAA', 'BBBB', 'AAAA']]\n self.assertEqual(sorted(obs), sorted(exp))\n\n # prep and qiime files have been created\n filepaths = pt.get_filepaths()\n self.assertEqual(len(filepaths), 2)\n self.assertEqual(filepaths[0][0], 22)\n self.assertEqual(filepaths[1][0], 21)",
"def test_create_one_aditional_port_then_create_another_one(self):\n qty = 1\n last_ticket = 1223\n self.assertEqual(self.cisco_sp_kadiweu.port_set.count(), 4)\n self.assertEqual(\n self.cisco_sp_kadiweu.port_set.latest('modified').name,\n \"TenGigE0/0/0/4\")\n\n self.cisco_sp_kadiweu.create_additional_ports(qty, last_ticket)\n self.assertEqual(self.cisco_sp_kadiweu.port_set.count(), 4+qty)\n self.assertEqual(\n self.cisco_sp_kadiweu.port_set.latest('modified').name,\n self.cisco_sp_kadiweu.model.switchportrange_set.first(\n ).name_format.format(\n self.cisco_sp_kadiweu.model.switchportrange_set.first(\n ).end+qty))\n\n self.cisco_sp_kadiweu.create_additional_ports(qty, last_ticket)\n self.assertEqual(self.cisco_sp_kadiweu.port_set.count(), 4+qty+qty)\n self.assertEqual(\n self.cisco_sp_kadiweu.port_set.latest('modified').name,\n self.cisco_sp_kadiweu.model.switchportrange_set.first(\n ).name_format.format(\n self.cisco_sp_kadiweu.model.switchportrange_set.first(\n ).end+qty+qty))",
"def setUp(self):\r\n self.full_id = 'edX/full/2012_Fall'\r\n self.toy_id = 'edX/toy/2012_Fall'",
"def _prepare_test_cases(ptfhost, request):\n logger.info(\"Preparing SAI test environment.\")\n _create_sai_test_folders(ptfhost)\n _copy_sai_test_cases(ptfhost, request)",
"def _generate_training_files(self):\r\n tmp_dir = get_qiime_temp_dir()\r\n training_set = RdpTrainingSet()\r\n reference_seqs_file = open(self.Params['reference_sequences_fp'], 'U')\r\n id_to_taxonomy_file = open(self.Params['id_to_taxonomy_fp'], 'U')\r\n\r\n for seq_id, seq in parse_fasta(reference_seqs_file):\r\n training_set.add_sequence(seq_id, seq)\r\n\r\n for line in id_to_taxonomy_file:\r\n seq_id, lineage_str = map(strip, line.split('\\t'))\r\n training_set.add_lineage(seq_id, lineage_str)\r\n\r\n training_set.dereplicate_taxa()\r\n\r\n rdp_taxonomy_file = NamedTemporaryFile(\r\n prefix='RdpTaxonAssigner_taxonomy_', suffix='.txt', dir=tmp_dir)\r\n rdp_taxonomy_file.write(training_set.get_rdp_taxonomy())\r\n rdp_taxonomy_file.seek(0)\r\n\r\n rdp_training_seqs_file = NamedTemporaryFile(\r\n prefix='RdpTaxonAssigner_training_seqs_', suffix='.fasta',\r\n dir=tmp_dir)\r\n for rdp_id, seq in training_set.get_training_seqs():\r\n rdp_training_seqs_file.write('>%s\\n%s\\n' % (rdp_id, seq))\r\n rdp_training_seqs_file.seek(0)\r\n\r\n self._training_set = training_set\r\n\r\n return rdp_taxonomy_file, rdp_training_seqs_file",
"def test_01_init(self):\n\n global primary_instance\n\n # Set up a client directory first.\n uptane.common.create_directory_structure_for_client(\n TEMP_CLIENT_DIR,\n create_primary_pinning_file(),\n {'imagerepo': TEST_IMAGE_REPO_ROOT_FNAME,\n 'director': TEST_DIRECTOR_ROOT_FNAME})\n\n for repository in [\"director\", \"imagerepo\"]:\n \tshutil.copytree(\n \t\tos.path.join(SOURCE_FOR_LOCAL_METADATA,repository), \n \t\tos.path.join(TEMP_CLIENT_DIR,repository))\n\n shutil.copytree(\n \tSOURCE_FOR_LOCAL_TARGETS, \n \tos.path.join(TEMP_CLIENT_DIR,'director','targets'))\n\n\n\n\n\n # TODO: Test with invalid pinning file\n # TODO: Test with pinning file lacking a Director repo.\n\n # Now try creating a Primary with a series of bad arguments, expecting\n # errors.\n\n # TODO: Add test for my_secondaries argument.\n\n # Invalid VIN:\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=5, # INVALID\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid ECU Serial\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=500, # INVALID\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid ECU Key\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key={''}, # INVALID\n time=clock,\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid time:\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time='potato', # INVALID\n timeserver_public_key=key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid format for Director Repository name\n with self.assertRaises(uptane.Error):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=5, #INVALID\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key, time=clock,\n timeserver_public_key = key_timeserver_pub,\n my_secondaries=[])\n\n # Invalid name for Director repository\n with self.assertRaises(uptane.Error):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name= \"invalid\", #INVALID\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key, time=clock,\n timeserver_public_key = key_timeserver_pub,\n my_secondaries=[])\n\n\n # Invalid timeserver key\n with self.assertRaises(tuf.FormatError):\n p = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=clock, # INVALID\n my_secondaries=[])\n\n \n\n print(TEMP_CLIENT_DIR)\n\n # Try creating a Primary, expecting it to work.\n # Initializes a Primary ECU, making a client directory and copying the root\n # file from the repositories.\n # Save the result for future tests, to save time and code.\n primary_instance = primary.Primary(\n full_client_dir=TEMP_CLIENT_DIR,\n director_repo_name=demo.DIRECTOR_REPO_NAME,\n vin=vin,\n ecu_serial=primary_ecu_serial,\n primary_key=primary_ecu_key,\n time=clock,\n timeserver_public_key=key_timeserver_pub)\n\n\n # Check the fields initialized in the instance to make sure they're correct.\n\n self.assertEqual([], primary_instance.nonces_to_send)\n self.assertEqual([], primary_instance.nonces_sent)\n self.assertEqual(vin, primary_instance.vin)\n self.assertEqual(primary_ecu_serial, primary_instance.ecu_serial)\n self.assertEqual(primary_ecu_key, primary_instance.primary_key)\n self.assertEqual(dict(), primary_instance.ecu_manifests)\n self.assertEqual(\n primary_instance.full_client_dir, TEMP_CLIENT_DIR)\n self.assertIsInstance(primary_instance.updater, tuf.client.updater.Updater)\n tuf.formats.ANYKEY_SCHEMA.check_match(primary_instance.timeserver_public_key)\n self.assertEqual([], primary_instance.my_secondaries)",
"def _init(self, prefix, path_map):\n if self.parent:\n self.root = self.parent.root\n\n self.path = prefix + (self.id or '')\n if self.path in path_map:\n # duplicate test path, resolve it by appending an index,\n\n # first of all, count how many duplicated siblings\n count = 1\n for subtest in self.parent.subtests:\n if subtest == self:\n break\n # '_' will only appear when we try to resolve duplicate path issue,\n # so if the id contains '_', it must be followed by a number.\n if subtest.id.partition('_')[0] == self.id:\n count += 1\n assert count > 1\n # this is the new ID, since FactoryTest constructor will assert ID only\n # contains [a-zA-Z0-9], the new ID must be unique.\n self.id += '_' + str(count)\n self.path = prefix + (self.id or '')\n\n assert self.path not in path_map, 'Duplicate test path %s' % (self.path)\n path_map[self.path] = self\n\n # subtests of a teardown test should be part of teardown as well\n if self.teardown:\n if self.action_on_failure != self.ACTION_ON_FAILURE.NEXT:\n logging.warning(\n '%s: action_on_failure=%s, `action_on_failure` of a teardown test '\n 'must be `NEXT`, the value will be overwritten.', self.path,\n self.action_on_failure)\n self.action_on_failure = self.ACTION_ON_FAILURE.NEXT\n for subtest in self.subtests:\n subtest.SetTeardown()\n\n for subtest in self.subtests:\n subtest.parent = self\n assert self.path, 'self.path should not be empty'\n # pylint: disable=protected-access\n if self.path[-1] == ':':\n subtest._init(self.path, path_map)\n else:\n subtest._init(self.path + '.', path_map)\n\n # next_sibling should point to next test\n for u, v in zip(self.subtests, self.subtests[1:]):\n u.next_sibling = v",
"def test_migrate_interpretation_request_rd(self):\n old_instance = GenericFactoryAvro.get_factory_avro(\n self.old_model.InterpretationRequestRD, VERSION_300, fill_nullables=False\n ).create() # reports_3_0_0.InterpretationRequestRD\n self._validate(old_instance)\n migrated_instance = MigrateReports3To4().migrate_interpretation_request_rd(old_instance=old_instance)\n self._validate(migrated_instance)\n\n old_big_wigs = old_instance.bigWigs\n new_big_wigs = migrated_instance.bigWigs\n\n if old_big_wigs is not None:\n for old_big_wig, new_big_wig in zip(old_big_wigs, new_big_wigs):\n self.assertIsInstance(new_big_wig, self.new_model.File)\n self.assertEqual(new_big_wig.sampleId, old_big_wig.SampleId)\n self.assertEqual(new_big_wig.uriFile, old_big_wig.URIFile)\n self.assertEqual(new_big_wig.fileType, old_big_wig.fileType)\n self.assertEqual(new_big_wig.md5Sum, None)\n\n old_instance = GenericFactoryAvro.get_factory_avro(\n self.old_model.InterpretationRequestRD, VERSION_300, fill_nullables=True\n ).create() # reports_3_0_0.InterpretationRequestRD\n self._validate(old_instance)\n migrated_instance = MigrateReports3To4().migrate_interpretation_request_rd(old_instance=old_instance)\n\n for old_variant, new_variant in zip(old_instance.TieredVariants, migrated_instance.tieredVariants):\n for old_re, new_re in zip(old_variant.reportEvents, new_variant.reportEvents):\n self.assertEqual(old_re.genomicFeature.HGNC, new_re.genomicFeature.hgnc)\n\n self._validate(migrated_instance)"
]
| [
"0.5887489",
"0.5692312",
"0.5588952",
"0.5525843",
"0.55244076",
"0.548657",
"0.51108813",
"0.50846964",
"0.50368524",
"0.4976738",
"0.48961604",
"0.48882294",
"0.48652807",
"0.48602736",
"0.4834495",
"0.48098412",
"0.47892126",
"0.47837698",
"0.47602794",
"0.47538057",
"0.4729472",
"0.47069177",
"0.46868947",
"0.4671167",
"0.46637937",
"0.46635306",
"0.4655428",
"0.4655253",
"0.4654295",
"0.4649258"
]
| 0.67898095 | 0 |
Create and upload real files to source and destination. Don't register it on destination. This way, fts will fail if overwrite = False If simulate_dst_corrupted is True, will upload a different file to destination, to simulate that it is corrupted | def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):
local_file = file_factory.file_generator()
did = did_factory.random_file_did()
did_factory.upload_test_file(src_rse, path=local_file, **did)
did_factory.upload_client.upload(
[
{
'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,
'rse': dst_rse,
'did_scope': did['scope'].external,
'did_name': did['name'],
'no_register': True,
}
]
)
return did | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overwrite_on_tape_topology(rse_factory, did_factory, root_account, vo, file_factory):\n\n rse1 = 'XRD1'\n rse1_id = rse_core.get_rse_id(rse=rse1, vo=vo)\n rse2 = 'XRD3'\n rse2_id = rse_core.get_rse_id(rse=rse2, vo=vo)\n rse3 = 'XRD4'\n rse3_id = rse_core.get_rse_id(rse=rse3, vo=vo)\n\n def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):\n \"\"\"\n Create and upload real files to source and destination. Don't register it on destination. This way, fts will fail if overwrite = False\n\n If simulate_dst_corrupted is True, will upload a different file to destination, to simulate that it is corrupted\n \"\"\"\n local_file = file_factory.file_generator()\n did = did_factory.random_file_did()\n did_factory.upload_test_file(src_rse, path=local_file, **did)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,\n 'rse': dst_rse,\n 'did_scope': did['scope'].external,\n 'did_name': did['name'],\n 'no_register': True,\n }\n ]\n )\n return did\n\n def __create_dids(did1_corrupted=True, did2_corrupted=True):\n \"\"\"\n Uploads two files:\n - one which requires multiple transfer hop to go to destination\n - one which can be transferred in one hop to destination rse\n \"\"\"\n # multihop transfer:\n did1 = __generate_and_upload_file(rse1, rse3, simulate_dst_corrupted=did1_corrupted)\n # direct transfer\n did2 = __generate_and_upload_file(rse2, rse3, simulate_dst_corrupted=did2_corrupted)\n rule_core.add_rule(dids=[did1, did2], account=root_account, copies=1, rse_expression=rse3, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n return rse1_id, rse2_id, rse3_id, did1, did2\n\n # Fake that destination RSE is a tape\n rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.TAPE})\n try:\n rse_core.add_rse_attribute(rse3_id, 'archive_timeout', 60)\n yield __create_dids\n finally:\n rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.DISK})\n rse_core.del_rse_attribute(rse3_id, 'archive_timeout')",
"def test_overwrite_hops(overwrite_on_tape_topology, caches_mock, did_factory, file_factory):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3),\n 'rse': rse_core.get_rse_name(rse2_id),\n 'did_scope': did1['scope'].external,\n 'did_name': did1['name'],\n 'no_register': True,\n }\n ]\n )\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n fts_schema_version = FTS3Transfertool(external_host=TEST_FTS_HOST).version()['schema']['major']\n if fts_schema_version >= 8:\n # Newer fts version will honor the overwrite_hop\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n else:\n # FTS only recently introduced the overwrite_hops parameter. It will be ignored on old\n # fts versions and the first hop will fail with the file exists error\n # TODO: remove this else after FTS 3.12 release and after updating rucio/fts container with the new release\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']",
"def send_file(self, src: PathLike, dest: PathLike, force: bool = False):",
"def _test_upload_dir_contents(self, filenames):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n for filename in filenames:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, filename),\n posixpath.join(remote_dest_dir, filename)))\n with open(os.path.join(local_src_dir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')",
"def test_upload_dir_contents_one_dir(self):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n subdir = 'subdir'\n os.mkdir(os.path.join(local_src_dir, subdir))\n for filename in ['file1', 'file2']:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, subdir, filename),\n posixpath.join(remote_dest_dir, subdir, filename)))\n with open(os.path.join(local_src_dir, subdir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')",
"def putFile(self, _src, _dst, delExisting = True):\n\n #-------------------- \n # Delete existing _dst from XNAT host.\n #-------------------- \n if delExisting:\n r = self.__httpsRequest('DELETE', _dst)\n #print(\"%s Uploading\\nsrc: '%s'\\n_dst: '%s'\"%(_src, _dst))\n\n\n\n #-------------------- \n # Clean '_dst' string and endcode\n #-------------------- \n _dst = Xnat.path.makeXnatUrl(self.host, _dst)\n _dst = str(_dst).encode('ascii', 'ignore')\n\n\n\n #-------------------- \n # Put the file in XNAT using the internal '__httpsRequest'\n # method.\n #-------------------- \n with open(_src, 'rb') as f:\n response = self.__httpsRequest('PUT', _dst, files={'file': f}, \n headers={'Content-Type': 'application/octet-stream'}, stream=True)\n\n return response",
"def upload(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if isinstance(remote, RemoteFile) and self.hash() == remote.hash:\n # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.path, remote.uri))\n return\n if not overwrite:\n raise ValueError(\"%s exists\" % remote.uri)\n\n # Uploading can either happen all at once (with a 150 MB limit),\n # or in chunks. If the file is smaller than the selected chunk size,\n # then try to upload in one go.\n chunksize = min(pdbox._args.get(\"chunksize\", 149.0), 149.0)\n pdbox.debug(\"Chunk size: %.2f MB\" % chunksize)\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return None\n\n # Set the write mode.\n if overwrite:\n mode = dropbox.files.WriteMode.overwrite\n else:\n mode = dropbox.files.WriteMode.add\n\n chunk = int(chunksize * 1024 * 1024) # Convert B to MB.\n\n with open(self.path, \"rb\") as f:\n data = f.read()\n sz = len(data)\n\n # TODO: Progress bars.\n if sz < chunk: # One-shot upload.\n meta = execute(pdbox.dbx.files_upload, data, dest, mode)\n else: # Multipart upload.\n nchunks = math.ceil(sz / chunk)\n # Initiate the upload with just the first byte.\n start = execute(pdbox.dbx.files_upload_session_start, f[0])\n cursor = dropbox.files.UploadSessionCursor(start.session_id, 1)\n\n # Now just add each chunk.\n while sz - cursor.offset > chunk:\n pdbox.debug(\n \"Uploading chunk %d/%d\" % (cursor.offset % chunk, nchunks),\n )\n execute(\n pdbox.dbx.files_upload_session_append_v2,\n data[cursor.offset:cursor.offset + chunk],\n cursor,\n )\n cursor.offset += chunk\n\n # Upload the remaining to finish the transaction.\n meta = execute(\n pdbox.dbx.files_upload_session_finish,\n data[cursor.offset:],\n dropbox.files.CommitInfo(dest, mode),\n )\n\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return RemoteFile(None, meta=meta)",
"def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n remf = remotefile\n else:\n if valid == {}:\n remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]\n else:\n remf = remotefile\n if overwrite == False:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" exists and overwrite was set to False. Upload was stopped.\"}\n\n try:\n fd = open(localfile, 'rb')\n except OSError as e:\n return {'Success' : False, \n 'LOG' : \"File \"+str(localfile)+\" could not be opened. Error was: \"+str(e)}\n\n fsize = os.path.getsize(localfile)\n\n if fsize > 0:\n code = \"filename _sp_updn '\"+remf+\"' recfm=N permission='\"+permission+\"';\"\n ll = self.submit(code, 'text')\n log1 = ll['LOG']\n\n self.stdin[0].send(str(fsize).encode()+b'tom says EOL=UPLOAD \\n')\n\n while True:\n buf = fd.read1(32768)\n sent = 0\n send = len(buf)\n blen = send\n if blen == 0:\n break\n while send:\n try:\n sent = 0\n sent = self.stdout[0].send(buf[blen-send:blen])\n except (BlockingIOError):\n pass\n send -= sent\n \n code = \"filename _sp_updn;\"\n else:\n log1 = ''\n code = \"\"\"\n filename _sp_updn '\"\"\"+remf+\"\"\"' recfm=F encoding=binary lrecl=1 permission='\"\"\"+permission+\"\"\"';\n data _null_;\n fid = fopen('_sp_updn', 'O');\n if fid then\n rc = fclose(fid);\n run;\n filename _sp_updn;\n \"\"\"\n\n ll2 = self.submit(code, 'text')\n fd.close()\n\n return {'Success' : True, \n 'LOG' : log1+ll2['LOG']}",
"def upload_slow(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n remf = remotefile\n else:\n if valid == {}:\n remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]\n else:\n remf = remotefile\n if overwrite == False:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" exists and overwrite was set to False. Upload was stopped.\"}\n\n try:\n fd = open(localfile, 'rb')\n except OSError as e:\n return {'Success' : False, \n 'LOG' : \"File \"+str(localfile)+\" could not be opened. Error was: \"+str(e)}\n\n code = \"\"\"\n filename saspydir '\"\"\"+remf+\"\"\"' recfm=F encoding=binary lrecl=1 permission='\"\"\"+permission+\"\"\"';\n data _null_;\n file saspydir; \n infile datalines;\n input;\n if _infile_ = '' then delete;\n lin = length(_infile_);\n outdata = inputc(_infile_, '$hex.', lin);\n lout = lin/2;\n put outdata $varying80. lout; \n datalines4;\"\"\"\n\n buf = fd.read1(40)\n if len(buf):\n self._asubmit(code, \"text\")\n else:\n code = \"\"\"\n filename saspydir '\"\"\"+remf+\"\"\"' recfm=F encoding=binary lrecl=1 permission='\"\"\"+permission+\"\"\"';\n data _null_;\n fid = fopen('saspydir', 'O');\n if fid then\n rc = fclose(fid);\n run;\\n\"\"\"\n\n ll = self.submit(code, 'text')\n fd.close()\n return {'Success' : True, \n 'LOG' : ll['LOG']}\n\n while len(buf):\n buf2 = ''\n for i in range(len(buf)):\n buf2 += '%02x' % buf[i]\n ll = self._asubmit(buf2, 'text')\n buf = fd.read1(40)\n\n self._asubmit(\";;;;\", \"text\")\n ll = self.submit(\"run;\\nfilename saspydir;\", 'text')\n fd.close()\n\n return {'Success' : True, \n 'LOG' : ll['LOG']}",
"def test_retrieve_files_move_existing_file(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n os.mkdir('/tmp/localhost')\n\n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n result_3 = os.path.isdir('/tmp/localhost.%s' % strftime('%H%M%s'))\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_3)\n self.assertTrue(result_2)\n self.assertTrue(result_1)",
"def storage_file_upload_batch(cmd, client, destination, source, destination_path=None, pattern=None, dryrun=False,\n validate_content=False, content_settings=None, max_connections=1, metadata=None,\n progress_callback=None):\n\n from ..util import glob_files_locally, normalize_blob_file_path, guess_content_type\n from ..track2_util import make_file_url\n\n source_files = [c for c in glob_files_locally(source, pattern)]\n logger = get_logger(__name__)\n settings_class = cmd.get_models('_models#ContentSettings')\n\n if dryrun:\n logger.info('upload files to file share')\n logger.info(' account %s', client.account_name)\n logger.info(' share %s', destination)\n logger.info(' total %d', len(source_files))\n return [{'File': make_file_url(client, os.path.dirname(dst) or None, os.path.basename(dst)),\n 'Type': guess_content_type(src, content_settings, settings_class).content_type} for src, dst in\n source_files]\n\n # TODO: Performance improvement\n # 1. Upload files in parallel\n def _upload_action(src, dst):\n dst = normalize_blob_file_path(destination_path, dst)\n dir_name = os.path.dirname(dst)\n file_name = os.path.basename(dst)\n\n _make_directory_in_files_share(client, dir_name)\n\n logger.warning('uploading %s', src)\n\n storage_file_upload(client.get_file_client(dst), src, content_settings, metadata, validate_content,\n progress_callback, max_connections)\n\n return make_file_url(client, dir_name, file_name)\n\n return list(_upload_action(src, dst) for src, dst in source_files)",
"def upload(outfile, outdir):\n outpath = outdir + \"/\" + outfile\n my_env = os.environ.copy()\n my_env[\"X509_USER_PROXY\"] = dst_cred\n for retry in range(0,99):\n try:\n subprocess.check_output([\"globus-url-copy\", \"-create-dest\",\n \"-rst\", \"-stall-timeout\", \"300\",\n \"-ds\", dst_dn, \"-dst-cred\", dst_cred,\n \"file://\" + os.getcwd() + \"/\" + outfile,\n dst_url + outpath], env=my_env)\n return 0\n except:\n continue\n subprocess.check_output([\"globus-url-copy\", \"-create-dest\",\n \"-rst\", \"-stall-timeout\", \"300\",\n \"-ds\", dst_dn, \"-dst-cred\", dst_cred,\n \"file://\" + os.getcwd() + \"/\" + outfile,\n dst_url + outpath], env=my_env)\n return 0",
"def test_prep_sffs_in_dir(self):\r\n prep_sffs_in_dir(self.sff_dir, self.sff_dir, make_flowgram=True)\r\n prep_sffs_in_dir(self.gz_sff_dir, self.gz_sff_dir, make_flowgram=True)\r\n\r\n fna_fp = os.path.join(self.sff_dir, 'test.fna')\r\n fna_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.fna')\r\n self.assertEqual(open(fna_fp).read(), fna_txt)\r\n self.assertEqual(open(fna_gz_fp).read(), fna_txt)\r\n\r\n qual_fp = os.path.join(self.sff_dir, 'test.qual')\r\n qual_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.qual')\r\n self.assertEqual(open(qual_fp).read(), qual_txt)\r\n self.assertEqual(open(qual_gz_fp).read(), qual_txt)\r\n\r\n flow_fp = os.path.join(self.sff_dir, 'test.txt')\r\n flow_gz_fp = os.path.join(self.gz_sff_dir, 'test_gz.txt')\r\n self.assertEqual(open(flow_fp).read(), flow_txt)\r\n self.assertEqual(open(flow_gz_fp).read(), flow_txt)",
"def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_receive(job_params):\n for job in (job_params if isinstance(job_params, list) else [job_params]):\n for file in job.get('files', []):\n if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'\n and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):\n # Fake that dst_file metadata contains file_on_tape == True\n # As we don't really have tape RSEs in our tests, file_on_tape is always false\n file['file_metadata']['dst_file']['file_on_tape'] = True\n return job_params\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Both transfers must be marked as failed because the file size is incorrect\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False\n # 2 runs: for multihop, finisher works one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Set overwrite to True before running the poller or finisher\n core_config.set('transfers', 'overwrite_corrupted_files', True)\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit one more time. Now the destination file must be overwritten\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'",
"def upload(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To upload a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\"Expected source ({}) to be a directory when \"\n \"using recursive mode.\".format(args.source))\n\n # local name of the directory that is being uploaded\n _, dir_name = os.path.split(args.source)\n\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n # build the remote path + fname\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force,\n update=args.update)\n\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force,\n update=args.update)",
"def send_data(self, fp, dest: PathLike, force: bool = False):",
"def FilePut(self, source_paths: list, remote_destination: str):\n lastChar = remote_destination[len(remote_destination)-1]\n if lastChar != '/':\n remote_destination += '/'\n\n try:\n paths = [p for pat in source_paths for p in self.expandPath(pat)]\n g = self.fileChunkGenerator(paths, True, remote_destination)\n status = self.filemanager.Put(g)\n print('# Copied {} files'.format(status.total_files))\n print('# Copied {} bytes'.format(status.total_bytes))\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e",
"def _put(self, src_fname, dst_fname):\n logging.info('Transferring file %s to %s', src_fname, self._ip_addr)\n sftp_cli = self._get_sftp_client()\n if sftp_cli is None:\n raise Exception('Not supported without ssh.')\n return sftp_cli.put(src_fname, dst_fname)",
"def _staf_file_copy(self, local_path, remote_path, overwrite=True, is_text_file=False):\n\n staf_create_request = ('CREATE DIRECTORY \"{0}\" '\n 'FULLPATH'.format(unix_style_path(dirname(remote_path))))\n\n result = self._staf_handle.submit(self._sut.network_address, 'fs', staf_create_request)\n\n if result.rc != result.Ok:\n raise CoreError(result.Ok)\n\n staf_copy_request = ('COPY FILE \"{0}\" TOFILE \"{1}\" '\n 'TOMACHINE \"{2}\"'.format(unix_style_path(local_path),\n unix_style_path(remote_path),\n self._sut.network_address))\n if is_text_file:\n staf_copy_request += ' TEXT'\n\n if not overwrite:\n staf_copy_request += ' FAILIFEXISTS'\n\n result = self._staf_handle.submit('local', 'fs', staf_copy_request)\n\n if result.rc != result.Ok:\n raise CoreError(result.result)",
"def send_dir(self, src: PathLike, dest: PathLike, force: bool = False):",
"def test_put_raises_on_overwriting(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src1 = os.path.join(uploads, 'demo-test.tar.gz')\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put_variant(src1, id, 'demo-test.tar.gz')\n with assert_raises(x.FileExists):\n backend.put_variant(src2, id, 'demo-test.tar.gz')",
"def test_move_overwrite(remote,AB,all_):\n testpath = os.path.join(os.path.abspath(os.path.split(__file__)[0]),\n 'test_dirs','pp','test_move_overwrite')\n try:\n shutil.rmtree(testpath)\n except:\n pass\n os.makedirs(testpath)\n testutil = testutils.Testutils(testpath=testpath)\n\n # Init\n testutil.write('A/fileA0',text='fileA0')\n testutil.write('A/fileB0',text='fileB0')\n\n # copy over\n testutil.copy_tree()\n\n # Start it\n config = testutil.get_config(remote=remote)\n testutil.init(config)\n\n # Apply actions\n testutil.write('A/fileA1',text='fileA1')\n testutil.move('A/fileA0','A/fileB1')\n\n testutil.write('B/fileB1',text='fileB1')\n testutil.move('B/fileB0','B/fileA1')\n\n # Sync\n if AB == 'A':\n mode = 'push'\n else:\n mode='pull'\n\n if all_:\n mode += '_all'\n\n testutil.run(config,mode=mode)\n\n # Check it -- Only need to check A\n diff = testutil.compare_tree()\n\n if all_:\n assert len(diff) == 0\n # In the end, all files are either moved or overwritten. We do not\n # expect there to be any differences\n elif AB == 'A': # Check backups in B\n assert diff == [('missing_inB', 'fileB0')] # Never gets pushed\n \n elif AB == 'B': # Check backups in B\n assert diff == [('missing_inA', 'fileA0')] # Never gets pulled",
"def moveFile(src, dest, bak=\"bak\"):\n\t\n\tmessage = \"processing: {0} -> {1}\".format(src, dest)\n\tlogger.info(message)\n\n\t#compare the source and destination, if the files are the same do nothing\n\tif os.path.exists(src) and os.path.exists(dest): \n\t\tmessage = \"file {0} found, comparing to {1}\".format(src, dest)\n\t\tlogger.info(message)\n\t\t(fileCheck, fileSig) = verifyFile(src, dest)\n\t\tif fileCheck:\n\t\t\tmessage = \"source file {0} matches destination file {1}\".format(src, dest)\n\t\t\tlogger.info(message)\n\t\t\treturn True\n\t\t\n\t#checks to see if the destination file exists, then creates a backup of it\n\tif os.path.exists(dest):\n\t\tbackupFileName = \"{0}.{1}\".format(dest, bak)\n\t\tmessage = \"file {0} exists, creating backup: {1}\".format(dest, backupFileName)\n\t\tlogger.info(message)\n\t\ttry:\n\t\t\tshutil.move(dest, backupFileName)\n\t\texcept IOError as errorMessage:\n\t\t\tlogger.error(errorMessage)\n\t\t\treturn False\n\t\t\n\t#attempts to copy the source file to the destination, \n\tif os.path.exists(src):\n\t\tmessage = \"copying {0} to {1})\".format(src, dest)\n\t\ttry:\n\t\t\tshutil.copy(src, dest)\n\t\texcept IOError as errorMessage:\n\t\t\tlogger.error(errorMessage)\n\t\t\tshutil.move(backupFilenName, dest)\n\t\t\treturn False\n\t\t\n\t#verify that files are the same\n\t(fileCheck, fileSig) = verifyFile(src, dest)\n\tif fileCheck:\n\t\tmessage = \"File transfer verified {0} -> {1}\".format(src, dest)\n\t\tlogger.info(message)\n\t\tmessage = \"File Signature for {0}: {1}\".format(src, fileSig)\n\t\tlogger.info(message)\n\t\treturn True\n\telse:\n\t\tmessage = \"file signatures do not match, rolling back {0} -> {1}\".format(backupFileName, dest)\n\t\tlogger.error(message)\n\t\n\t#roll back file\n\ttry:\n\t\tshutil.move(backupFileName, dest)\n\texcept IOError as errorMessage:\n\t\tlogger.error(errorMessage)\n\t\treturn False\n\t\n\treturn True",
"def test_force_put_to_overwrite_existing(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n filename = 'demo-test.tar.gz'\n src1 = os.path.join(uploads, filename)\n src2 = os.path.join(uploads, 'test.jpg')\n id = utils.generate_id(filename)\n backend.put_variant(src1, id, filename)\n backend.put_variant(src2, id, filename, True)\n\n path = '/'.join(backend.id_to_path(id)) + '/' + filename\n client = boto3.client('s3', **backend.credentials)\n res = client.head_object(Bucket=backend.bucket_name, Key=path)\n self.assertEquals(\n str(os.path.getsize(src2)),\n str(res['ResponseMetadata']['HTTPHeaders']['content-length'])\n )",
"def bulk_upload ( server, identity, src_dir, tgt_dir ) :\n tmp_tarfilepath = '/tmp/'\n tmp_tarfilename = server + '.tar.gz'\n tmp_file = tmp_tarfilepath + tmp_tarfilename\n\n # Tar up the src directory\n s = subprocess.call( [ '/bin/sh', '-c',\n 'cd ' + src_dir + ' && tar czf ' + tmp_file + ' .' ] )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Copy the tar file up to the server\n s = scp_call( server, identity, tmp_file, tmp_tarfilepath )\n if s != 0 :\n print 'Unable to upload files.'\n return s\n\n # Unpack the tar file on the server\n s = ssh_call( server,\n identity,\n 'cd ' + tgt_dir + ' && sudo tar xzf ' + tmp_file + ' && rm ' + tmp_file + ' && sudo chown -R root:root *' )\n return s",
"def upload(self, source, dest):\n if os.path.isdir(source):\n self.upload_dir(source, dest)\n else:\n self.upload_file(source, dest)",
"def _put(self, source_path, remote_filename):\n\n quota = self.http_client.get(self.metadata_url + 'account/quota')\n quota.raise_for_status()\n available = quota.json()['available']\n\n source_size = os.path.getsize(source_path.name)\n\n if source_size > available:\n raise BackendException(\n 'Out of space: trying to store \"%s\" (%d bytes), but only '\n '%d bytes available on Amazon Drive.' % (\n source_path.name, source_size, available))\n\n # Just check the cached list, to avoid _list for every new file being\n # uploaded\n if remote_filename in self.names_to_ids:\n log.Debug('File %s seems to already exist on Amazon Drive. Deleting '\n 'before attempting to upload it again.' % remote_filename)\n self._delete(remote_filename)\n\n metadata = {'name': remote_filename, 'kind': 'FILE',\n 'parents': [self.backup_target_id]}\n headers = {'Content-Type': 'multipart/form-data; boundary=%s'\n % self.MULTIPART_BOUNDARY}\n data = self.multipart_stream(metadata, source_path)\n\n response = self.http_client.post(\n self.content_url + 'nodes?suppress=deduplication',\n data=data,\n headers=headers)\n\n if response.status_code == 409: # \"409 : Duplicate file exists.\"\n self.raise_for_existing_file(remote_filename)\n elif response.status_code == 201:\n log.Debug('%s uploaded successfully' % remote_filename)\n elif response.status_code == 408 or response.status_code == 504:\n log.Info('%s upload failed with timeout status code=%d. Speculatively '\n 'waiting for %d seconds to see if Amazon Drive finished the '\n 'upload anyway' % (remote_filename, response.status_code,\n globals.timeout))\n tries = globals.timeout / 15\n while tries >= 0:\n tries -= 1\n time.sleep(15)\n\n remote_size = self._query(remote_filename)['size']\n if source_size == remote_size:\n log.Debug('Upload turned out to be successful after all.')\n return\n elif remote_size == -1:\n log.Debug('Uploaded file is not yet there, %d tries left.'\n % (tries + 1))\n continue\n else:\n self.raise_for_existing_file(remote_filename)\n raise BackendException('%s upload failed and file did not show up '\n 'within time limit.' % remote_filename)\n else:\n log.Debug('%s upload returned an undesirable status code %s'\n % (remote_filename, response.status_code))\n response.raise_for_status()\n\n parsed = response.json()\n if 'id' not in parsed:\n raise BackendException('%s was uploaded, but returned JSON does not '\n 'contain ID of new file. Retrying.\\nJSON:\\n\\n%s'\n % (remote_filename, parsed))\n\n # XXX: The upload may be considered finished before the file shows up\n # in the file listing. As such, the following is required to avoid race\n # conditions when duplicity calls _query or _list.\n self.names_to_ids[parsed['name']] = parsed['id']",
"def upload(self, remote, local, force = False):\n fl = self.list([ remote ])\n if force == False and remote in fl:\n remote_hash = fl[remote]\n h = hashlib.sha256()\n commonl.hash_file(h, local)\n if remote_hash == h.hexdigest():\n # remote hash is the same, no need to upload\n return\n\n with io.open(local, \"rb\") as inf:\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"POST\",\n file_path = remote,\n files = { 'file': inf })",
"def copy(self, source_host, dest_host, filename):",
"def upload_file(self, source, destination, overwrite=True, parallelism=1,\n **kwargs):\n c = self.get_conn()\n c.upload(hdfs_path=destination,\n local_path=source,\n overwrite=overwrite,\n n_threads=parallelism,\n progress=self.progress,\n **kwargs)\n logging.debug(\"Uploaded file {} to {}\".format(source, destination))"
]
| [
"0.700196",
"0.6252557",
"0.62507075",
"0.6083093",
"0.5982149",
"0.5853024",
"0.58049345",
"0.5701598",
"0.56701756",
"0.5618603",
"0.5572697",
"0.5523261",
"0.5505248",
"0.5502287",
"0.5501598",
"0.55005914",
"0.5451989",
"0.54514116",
"0.5432832",
"0.54321486",
"0.54235744",
"0.54141587",
"0.5411655",
"0.5388962",
"0.5380316",
"0.5372131",
"0.5350238",
"0.53491455",
"0.5337232",
"0.5334615"
]
| 0.72678787 | 0 |
Ensure that overwrite is not set for transfers towards TAPE RSEs | def test_overwrite_on_tape(overwrite_on_tape_topology, caches_mock):
rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)
all_rses = [rse1_id, rse2_id, rse3_id]
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)
request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)
assert request['state'] == RequestState.FAILED
assert 'Destination file exists and overwrite is not enabled' in request['err_msg']
request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)
assert request['state'] == RequestState.FAILED
assert 'Destination file exists and overwrite is not enabled' in request['err_msg'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_overwrite_hops(overwrite_on_tape_topology, caches_mock, did_factory, file_factory):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3),\n 'rse': rse_core.get_rse_name(rse2_id),\n 'did_scope': did1['scope'].external,\n 'did_name': did1['name'],\n 'no_register': True,\n }\n ]\n )\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n fts_schema_version = FTS3Transfertool(external_host=TEST_FTS_HOST).version()['schema']['major']\n if fts_schema_version >= 8:\n # Newer fts version will honor the overwrite_hop\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n else:\n # FTS only recently introduced the overwrite_hops parameter. It will be ignored on old\n # fts versions and the first hop will fail with the file exists error\n # TODO: remove this else after FTS 3.12 release and after updating rucio/fts container with the new release\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']",
"async def test_preserved_other_overwrites_voice(self):\n for overwrite_json in ('{\"connect\": true, \"speak\": true}', None):\n with self.subTest(overwrite_json=overwrite_json):\n self.cog.previous_overwrites.get.return_value = overwrite_json\n\n prev_overwrite_dict = dict(self.voice_overwrite)\n await self.cog._unsilence(self.voice_channel)\n new_overwrite_dict = dict(self.voice_overwrite)\n\n # Remove these keys because they were modified by the unsilence.\n del prev_overwrite_dict[\"connect\"]\n del prev_overwrite_dict[\"speak\"]\n del new_overwrite_dict[\"connect\"]\n del new_overwrite_dict[\"speak\"]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)",
"def overwrite_all ( self ):\n return self.value == self.OV_ALL",
"def is_overwrite_all(self):\n return self._tag == 'overwrite_all'",
"def _ensure_overwrite(settings):\n overwrite = False\n files = [settings.api_properties, settings.api_definition, settings.icon, settings.script, SETTINGS_FILE]\n existing_files = [file for file in files if os.path.exists(file)]\n if len(existing_files) > 0:\n msg = '{} file(s) exist. Do you want to overwrite?'.format(existing_files)\n overwrite = prompt_y_n(msg)\n if not overwrite:\n raise CLIError('{} files not overwritten.'.format(existing_files))\n\n return overwrite",
"def is_complete_overwrite(self) -> Optional[bool]:\n return pulumi.get(self, \"is_complete_overwrite\")",
"def overwrite_on_tape_topology(rse_factory, did_factory, root_account, vo, file_factory):\n\n rse1 = 'XRD1'\n rse1_id = rse_core.get_rse_id(rse=rse1, vo=vo)\n rse2 = 'XRD3'\n rse2_id = rse_core.get_rse_id(rse=rse2, vo=vo)\n rse3 = 'XRD4'\n rse3_id = rse_core.get_rse_id(rse=rse3, vo=vo)\n\n def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):\n \"\"\"\n Create and upload real files to source and destination. Don't register it on destination. This way, fts will fail if overwrite = False\n\n If simulate_dst_corrupted is True, will upload a different file to destination, to simulate that it is corrupted\n \"\"\"\n local_file = file_factory.file_generator()\n did = did_factory.random_file_did()\n did_factory.upload_test_file(src_rse, path=local_file, **did)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,\n 'rse': dst_rse,\n 'did_scope': did['scope'].external,\n 'did_name': did['name'],\n 'no_register': True,\n }\n ]\n )\n return did\n\n def __create_dids(did1_corrupted=True, did2_corrupted=True):\n \"\"\"\n Uploads two files:\n - one which requires multiple transfer hop to go to destination\n - one which can be transferred in one hop to destination rse\n \"\"\"\n # multihop transfer:\n did1 = __generate_and_upload_file(rse1, rse3, simulate_dst_corrupted=did1_corrupted)\n # direct transfer\n did2 = __generate_and_upload_file(rse2, rse3, simulate_dst_corrupted=did2_corrupted)\n rule_core.add_rule(dids=[did1, did2], account=root_account, copies=1, rse_expression=rse3, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n return rse1_id, rse2_id, rse3_id, did1, did2\n\n # Fake that destination RSE is a tape\n rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.TAPE})\n try:\n rse_core.add_rse_attribute(rse3_id, 'archive_timeout', 60)\n yield __create_dids\n finally:\n rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.DISK})\n rse_core.del_rse_attribute(rse3_id, 'archive_timeout')",
"async def test_cached_previous_overwrites(self):\n overwrite_json = (\n '{\"send_messages\": true, \"add_reactions\": false, \"create_private_threads\": true, '\n '\"create_public_threads\": false, \"send_messages_in_threads\": true}'\n )\n await self.cog._set_silence_overwrites(self.text_channel)\n self.cog.previous_overwrites.set.assert_awaited_once_with(self.text_channel.id, overwrite_json)",
"def test_setOverwrite(self):\n fp = FilePath(self.mktemp())\n fp.setContent(b\"I love contributing to Twisted!\")\n protocol = self.makeConnectedDccFileReceive(fp.path, overwrite=True)\n\n self.allDataReceivedForProtocol(protocol, b\"Twisted rocks!\")\n\n self.assertEqual(fp.getContent(), b\"Twisted rocks!\")",
"async def test_preserved_other_overwrites_text(self):\n for overwrite_json in ('{\"send_messages\": true, \"add_reactions\": null}', None):\n with self.subTest(overwrite_json=overwrite_json):\n self.cog.previous_overwrites.get.return_value = overwrite_json\n\n prev_overwrite_dict = dict(self.text_overwrite)\n await self.cog._unsilence(self.text_channel)\n new_overwrite_dict = dict(self.text_overwrite)\n\n # Remove these keys because they were modified by the unsilence.\n del prev_overwrite_dict[\"send_messages\"]\n del prev_overwrite_dict[\"add_reactions\"]\n del new_overwrite_dict[\"send_messages\"]\n del new_overwrite_dict[\"add_reactions\"]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)",
"def test_overwrite_raises_an_error_by_default(self):\n set_default_for_missing_keys('test')\n\n with pytest.raises(ValueError) as e:\n set_default_for_missing_keys(None)\n\n # confirm that error message correctly indicates the fix/resolution\n assert 'pass `overwrite=True`' in str(e.value)",
"async def test_preserved_other_overwrites_voice(self):\n prev_overwrite_dict = dict(self.voice_overwrite)\n await self.cog._set_silence_overwrites(self.voice_channel)\n new_overwrite_dict = dict(self.voice_overwrite)\n\n # Remove 'connect' & 'speak' keys because they were changed by the method.\n del prev_overwrite_dict[\"connect\"]\n del prev_overwrite_dict[\"speak\"]\n del new_overwrite_dict[\"connect\"]\n del new_overwrite_dict[\"speak\"]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)",
"async def test_restored_overwrites_voice(self):\n await self.cog._unsilence(self.voice_channel)\n self.voice_channel.set_permissions.assert_awaited_once_with(\n self.cog._verified_voice_role,\n overwrite=self.voice_overwrite,\n )\n\n # Recall that these values are determined by the fixture.\n self.assertTrue(self.voice_overwrite.connect)\n self.assertTrue(self.voice_overwrite.speak)",
"async def test_cache_miss_used_default_overwrites_voice(self):\n self.cog.previous_overwrites.get.return_value = None\n\n await self.cog._unsilence(self.voice_channel)\n self.voice_channel.set_permissions.assert_awaited_once_with(\n self.cog._verified_voice_role,\n overwrite=self.voice_overwrite,\n )\n\n self.assertIsNone(self.voice_overwrite.connect)\n self.assertIsNone(self.voice_overwrite.speak)",
"def check_overwrite(self, filename, workspace):\n if not self.overwrite.value and os.path.isfile(filename):\n try:\n return (\n workspace.interaction_request(\n self, workspace.measurements.image_set_number, filename\n )\n == \"Yes\"\n )\n except workspace.NoInteractionException:\n raise ValueError(\n 'SaveImages: trying to overwrite %s in headless mode, but Overwrite files is set to \"No\"'\n % (filename)\n )\n return True",
"def no_overwrite_example():",
"async def test_restored_overwrites_text(self):\n await self.cog._unsilence(self.text_channel)\n self.text_channel.set_permissions.assert_awaited_once_with(\n self.cog._everyone_role,\n overwrite=self.text_overwrite,\n )\n\n # Recall that these values are determined by the fixture.\n self.assertTrue(self.text_overwrite.send_messages)\n self.assertFalse(self.text_overwrite.add_reactions)",
"def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_receive(job_params):\n for job in (job_params if isinstance(job_params, list) else [job_params]):\n for file in job.get('files', []):\n if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'\n and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):\n # Fake that dst_file metadata contains file_on_tape == True\n # As we don't really have tape RSEs in our tests, file_on_tape is always false\n file['file_metadata']['dst_file']['file_on_tape'] = True\n return job_params\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Both transfers must be marked as failed because the file size is incorrect\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False\n # 2 runs: for multihop, finisher works one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Set overwrite to True before running the poller or finisher\n core_config.set('transfers', 'overwrite_corrupted_files', True)\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit one more time. Now the destination file must be overwritten\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'",
"def sectional_overwrite_check(self):\n\n for rule in self.options['sectional_overwrite']:\n if self.lineage_test(rule):\n return True\n return False",
"def overwrite_url_allowed(self, overwrite_url_allowed):\n\n self._overwrite_url_allowed = overwrite_url_allowed",
"async def test_cache_miss_used_default_overwrites_text(self):\n self.cog.previous_overwrites.get.return_value = None\n\n await self.cog._unsilence(self.text_channel)\n self.text_channel.set_permissions.assert_awaited_once_with(\n self.cog._everyone_role,\n overwrite=self.text_overwrite,\n )\n\n self.assertIsNone(self.text_overwrite.send_messages)\n self.assertIsNone(self.text_overwrite.add_reactions)",
"def sectional_overwrite_no_negate_check(self):\n\n for rule in self.options[\n 'sectional_overwrite_no_negate']:\n if self.lineage_test(rule):\n return True\n return False",
"def test_transfer_inherit_demote(self):\n self.make_assignment(self.project, self.user_new, self.role_contributor)\n # Set category role for project owner\n self.make_assignment(self.category, self.user_owner, self.role_delegate)\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_new.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n self.assertEqual(\n self.project.get_role(self.user_new).role, self.role_contributor\n )",
"def ProcessTableCopyOverwrite(ref, args, request):\n del ref # Unused\n if args.overwrite:\n request.job.configuration.copy.writeDisposition = 'WRITE_TRUNCATE'\n return request",
"async def test_preserved_other_overwrites_text(self):\n prev_overwrite_dict = dict(self.text_overwrite)\n await self.cog._set_silence_overwrites(self.text_channel)\n new_overwrite_dict = dict(self.text_overwrite)\n\n # Remove related permission keys because they were changed by the method.\n for perm_name in (\n \"send_messages\",\n \"add_reactions\",\n \"create_private_threads\",\n \"create_public_threads\",\n \"send_messages_in_threads\"\n ):\n del prev_overwrite_dict[perm_name]\n del new_overwrite_dict[perm_name]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)",
"def overwrite_original_file(self):\n return self.__overwrite_original_file",
"def test_overwrite(self):\n set_default_for_missing_keys('hello world')\n set_default_for_missing_keys(123, overwrite=True)\n\n assert DotWizPlus().missing_key == 123",
"def _should_bypass_reservation(self):\n should_bypass_reservation = super(StockMove, self)._should_bypass_reservation()\n if not should_bypass_reservation and self.is_subcontract:\n return True\n return should_bypass_reservation",
"def test_move_overwrite(remote,AB,all_):\n testpath = os.path.join(os.path.abspath(os.path.split(__file__)[0]),\n 'test_dirs','pp','test_move_overwrite')\n try:\n shutil.rmtree(testpath)\n except:\n pass\n os.makedirs(testpath)\n testutil = testutils.Testutils(testpath=testpath)\n\n # Init\n testutil.write('A/fileA0',text='fileA0')\n testutil.write('A/fileB0',text='fileB0')\n\n # copy over\n testutil.copy_tree()\n\n # Start it\n config = testutil.get_config(remote=remote)\n testutil.init(config)\n\n # Apply actions\n testutil.write('A/fileA1',text='fileA1')\n testutil.move('A/fileA0','A/fileB1')\n\n testutil.write('B/fileB1',text='fileB1')\n testutil.move('B/fileB0','B/fileA1')\n\n # Sync\n if AB == 'A':\n mode = 'push'\n else:\n mode='pull'\n\n if all_:\n mode += '_all'\n\n testutil.run(config,mode=mode)\n\n # Check it -- Only need to check A\n diff = testutil.compare_tree()\n\n if all_:\n assert len(diff) == 0\n # In the end, all files are either moved or overwritten. We do not\n # expect there to be any differences\n elif AB == 'A': # Check backups in B\n assert diff == [('missing_inB', 'fileB0')] # Never gets pushed\n \n elif AB == 'B': # Check backups in B\n assert diff == [('missing_inA', 'fileA0')] # Never gets pulled",
"def test_overwrites(self):\n\n extra_con = set([Constraint('fake', ['OVERWRITE'])])\n the_process_unit = ProcessUnit([self.a_pattern_ds], '/%fake%/%file%/%pattern%.txt',\n 'echo', extra_constraints=extra_con)\n\n ds_result = the_process_unit.execute(simulate=True)\n\n expected_in_cons = set([Constraint('fake', ['fake_1']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n expected_out_cons = set([Constraint('fake', ['OVERWRITE']),\n Constraint('file', ['file_1']),\n Constraint('pattern', ['pattern_1'])])\n\n self.assertEqual(expected_in_cons, self.a_pattern_ds.constraints)\n self.assertEqual(expected_out_cons, ds_result.constraints)"
]
| [
"0.61044496",
"0.6002369",
"0.59951496",
"0.5960979",
"0.5931762",
"0.5837875",
"0.58369833",
"0.58320355",
"0.57500327",
"0.5735235",
"0.5722755",
"0.571563",
"0.5699335",
"0.5663631",
"0.5567735",
"0.5546666",
"0.5525861",
"0.5514419",
"0.54601634",
"0.5448126",
"0.54353076",
"0.5422771",
"0.5388919",
"0.5383282",
"0.536698",
"0.53656733",
"0.53456277",
"0.5262732",
"0.52308285",
"0.5228192"
]
| 0.6298048 | 0 |
Ensure that we request overwrite of intermediate hops on multihop transfers towards TAPE RSEs | def test_overwrite_hops(overwrite_on_tape_topology, caches_mock, did_factory, file_factory):
rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)
did_factory.upload_client.upload(
[
{
'path': file_factory.file_generator(size=3),
'rse': rse_core.get_rse_name(rse2_id),
'did_scope': did1['scope'].external,
'did_name': did1['name'],
'no_register': True,
}
]
)
all_rses = [rse1_id, rse2_id, rse3_id]
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)
fts_schema_version = FTS3Transfertool(external_host=TEST_FTS_HOST).version()['schema']['major']
if fts_schema_version >= 8:
# Newer fts version will honor the overwrite_hop
request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)
assert request['state'] == RequestState.DONE
request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)
assert request['state'] == RequestState.FAILED
assert 'Destination file exists and overwrite is not enabled' in request['err_msg']
else:
# FTS only recently introduced the overwrite_hops parameter. It will be ignored on old
# fts versions and the first hop will fail with the file exists error
# TODO: remove this else after FTS 3.12 release and after updating rucio/fts container with the new release
request = __wait_for_state_transition(dst_rse_id=rse2_id, **did1)
assert request['state'] == RequestState.FAILED
assert 'Destination file exists and overwrite is not enabled' in request['err_msg']
request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)
assert request['state'] == RequestState.FAILED
assert 'Unused hop in multi-hop' in request['err_msg'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_overwrite_corrupted_files(overwrite_on_tape_topology, core_config_mock, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=True, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_receive(job_params):\n for job in (job_params if isinstance(job_params, list) else [job_params]):\n for file in job.get('files', []):\n if (file.get('file_metadata', {}).get('dst_type') == 'TAPE'\n and file.get('file_metadata', {}).get('dst_file', {}).get('file_on_tape') is not None):\n # Fake that dst_file metadata contains file_on_tape == True\n # As we don't really have tape RSEs in our tests, file_on_tape is always false\n file['file_metadata']['dst_file']['file_on_tape'] = True\n return job_params\n\n with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Both transfers must be marked as failed because the file size is incorrect\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the failed requests. They must fail again, because overwrite_corrupted_files is False\n # 2 runs: for multihop, finisher works one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # Set overwrite to True before running the poller or finisher\n core_config.set('transfers', 'overwrite_corrupted_files', True)\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit one more time. Now the destination file must be overwritten\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.QUEUED\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'\n request = request_core.get_request_by_did(rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.SUBMITTED\n assert __wait_for_fts_state(request, expected_state='ARCHIVING') == 'ARCHIVING'",
"def test_multihop_intermediate_replica_lifecycle(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock):\n src_rse1_name = 'XRD1'\n src_rse1_id = rse_core.get_rse_id(rse=src_rse1_name, vo=vo)\n src_rse2_name = 'XRD2'\n src_rse2_id = rse_core.get_rse_id(rse=src_rse2_name, vo=vo)\n jump_rse_name = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse_name, vo=vo)\n dst_rse_name = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse_name, vo=vo)\n\n all_rses = [src_rse1_id, src_rse2_id, jump_rse_id, dst_rse_id]\n did = did_factory.upload_test_file(src_rse1_name)\n\n # Copy replica to a second source. To avoid the special case of having a unique last replica, which could be handled in a special (more careful) way\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=src_rse2_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=src_rse2_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rse_core.set_rse_limits(rse_id=jump_rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=jump_rse_id, source='storage', used=1, free=0)\n try:\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n\n # Submit transfers to FTS\n # Ensure a replica was created on the intermediary host with epoch tombstone\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.SUBMITTED\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['tombstone'] == datetime(year=1970, month=1, day=1)\n assert replica['state'] == ReplicaState.COPYING\n\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # Fake an existing unused source with raking of 0 for the second source.\n # The ranking of this source should remain at 0 till the end.\n\n @transactional_session\n def __fake_source_ranking(*, session=None):\n models.Source(request_id=request['id'],\n scope=request['scope'],\n name=request['name'],\n rse_id=src_rse2_id,\n dest_rse_id=request['dest_rse_id'],\n ranking=0,\n bytes=request['bytes'],\n url=None,\n is_using=False). \\\n save(session=session, flush=False)\n\n __fake_source_ranking()\n\n # The intermediate replica is protected by its state (Copying)\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.COPYING\n\n # Wait for the intermediate replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # ensure tha the ranking was correct for all sources and intermediate rses\n assert __get_source(request_id=request['id'], src_rse_id=src_rse1_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=src_rse2_id, **did).ranking == 0\n # Only group_bulk=1 part of the path was submitted.\n # run submitter again to copy from jump rse to destination rse\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Wait for the destination replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses='test_container_xrd=True', exclude_rses=None)\n\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=jump_rse_id, **did)\n\n # 3 request: copy to second source + 2 hops (each separately)\n # Use inequalities, because there can be left-overs from other tests\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 3\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_common_submit_transfer_total') >= 3\n # at least the failed hop\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') > 0\n finally:\n\n @transactional_session\n def _cleanup_all_usage_and_limits(rse_id, *, session=None):\n session.query(models.RSELimit).filter_by(rse_id=rse_id).delete()\n session.query(models.RSEUsage).filter_by(rse_id=rse_id, source='storage').delete()\n\n _cleanup_all_usage_and_limits(rse_id=jump_rse_id)",
"def test_overwrite_on_tape(overwrite_on_tape_topology, caches_mock):\n rse1_id, rse2_id, rse3_id, did1, did2 = overwrite_on_tape_topology(did1_corrupted=False, did2_corrupted=True)\n all_rses = [rse1_id, rse2_id, rse3_id]\n\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did1)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']\n request = __wait_for_state_transition(dst_rse_id=rse3_id, **did2)\n assert request['state'] == RequestState.FAILED\n assert 'Destination file exists and overwrite is not enabled' in request['err_msg']",
"def test_two_multihops_same_intermediate_rse(rse_factory, did_factory, root_account, core_config_mock, caches_mock):\n # +------+ +------+ +------+ +------+ +------+\n # | | | | | | | | | |\n # | RSE1 +--->| RSE2 +--->| RSE3 +--->| RSE4 +--->| RSE5 |\n # | | | | | | | | | |\n # +------+ +------+ +---+--+ +------+ +------+\n # |\n # | +------+ +------+\n # | | | | |\n # +------>| RSE6 +--->| RSE7 |\n # | | | |\n # +------+ +------+\n _, _, reaper_cache_region = caches_mock\n rse1, rse1_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse2, rse2_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse3, rse3_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse4, rse4_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse5, rse5_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse6, rse6_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n rse7, rse7_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default')\n all_rses = [rse1_id, rse2_id, rse3_id, rse4_id, rse5_id, rse6_id, rse7_id]\n for rse_id in all_rses:\n rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)\n rse_core.set_rse_limits(rse_id=rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=rse_id, source='storage', used=1, free=0)\n distance_core.add_distance(rse1_id, rse2_id, distance=10)\n distance_core.add_distance(rse2_id, rse3_id, distance=10)\n distance_core.add_distance(rse3_id, rse4_id, distance=10)\n distance_core.add_distance(rse4_id, rse5_id, distance=10)\n distance_core.add_distance(rse3_id, rse6_id, distance=10)\n distance_core.add_distance(rse6_id, rse7_id, distance=10)\n\n did = did_factory.upload_test_file(rse1)\n rule_core.add_rule(dids=[did], account=root_account, copies=2, rse_expression=f'{rse5}|{rse7}', grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n class _FTSWrapper(FTSWrapper):\n @staticmethod\n def on_submit(file):\n # Simulate using the mock gfal plugin a transfer failure\n file['sources'] = [set_query_parameters(s_url, {'errno': 2}) for s_url in file['sources']]\n\n # Submit the first time, but force a failure to verify that retries are correctly handled\n with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=rse2_id, **did)\n assert request['state'] == RequestState.FAILED\n\n # Re-submit the transfer without simulating a failure. Everything should go as normal starting now.\n for _ in range(4):\n # for multihop, finisher works one hop at a time. 4 is the maximum number of hops in this test graph\n finisher(once=True, partition_wait_time=0)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n # one request must be submitted, but the second will only be queued\n if request_core.get_request_by_did(rse_id=rse5_id, **did)['state'] == RequestState.QUEUED:\n rse_id_second_to_last_queued, rse_id_queued = rse4_id, rse5_id\n rse_id_second_to_last_submit, rse_id_submitted = rse6_id, rse7_id\n else:\n rse_id_second_to_last_queued, rse_id_queued = rse6_id, rse7_id\n rse_id_second_to_last_submit, rse_id_submitted = rse4_id, rse5_id\n request = request_core.get_request_by_did(rse_id=rse_id_queued, **did)\n assert request['state'] == RequestState.QUEUED\n request = request_core.get_request_by_did(rse_id=rse_id_submitted, **did)\n assert request['state'] == RequestState.SUBMITTED\n\n # Calling submitter again will not unblock the queued requests\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_submitted, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n request = request_core.get_request_by_did(rse_id=rse_id_queued, **did)\n assert request['state'] == RequestState.QUEUED\n\n # Once the submitted transfer is done, the submission will continue for second request (one hop at a time)\n # First of the remaining two hops submitted\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_second_to_last_queued, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # One of the intermediate replicas is eligible for deletion. Others are blocked by entries in source table\n reaper_cache_region.invalidate()\n reaper(once=True, rses=[], include_rses='|'.join([rse2, rse3, rse4, rse6]), exclude_rses=None)\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=rse_id_second_to_last_submit, **did)\n for rse_id in [rse2_id, rse3_id, rse_id_second_to_last_queued]:\n replica_core.get_replica(rse_id=rse_id, **did)\n\n # Final hop\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=10, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=rse_id_queued, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # All intermediate replicas can be deleted\n reaper_cache_region.invalidate()\n reaper(once=True, rses=[], include_rses='|'.join([rse2, rse3, rse4, rse6]), exclude_rses=None)\n for rse_id in [rse2_id, rse3_id, rse4_id, rse6_id]:\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=rse_id, **did)",
"def ocp_is_tripped(self):\n self._raise_not_implemented()",
"def move_orders_flow_shop():\n # All orders from order_pool to WIP_A\n # All orders from WIP_A to M_A\n # if product_type 1,2,3 then to WIP_B\n # if product type 4,5,6 then to WIP_C\n # all from WIP_B to M_B\n # all from WIP_C to M_C\n # if product type 1 or 4, then to WIP_D\n # if product type 2 or 5, then to WIP_E\n # if product type 3 or 6, then to WIP_F\n # all to FGI\n\n ##################### Step 1: empty the machines that have finished production in the previous step\n\n # Move order from machine_A to WIP_B or WIP_C, if processing_time_remaining of order is 0\n if len(environment.machine_A.orders_inside_the_machine) == 1:\n if environment.machine_A.orders_inside_the_machine[0].processing_time_remaining <= 0:\n environment.machine_A.orders_inside_the_machine[0].arrival_prodstep_2_wip = global_settings.current_time\n\n if environment.machine_A.orders_inside_the_machine[0].product_type in (1, 2, 3):\n if global_settings.show_machine_output == True:\n print(\"Step \" + str(global_settings.current_time) + \": Machine_A: order finished. orderID: \" +\n str(environment.machine_A.orders_inside_the_machine[0].orderID) + \" || product type: \"\n + str(environment.machine_A.orders_inside_the_machine[0].product_type))\n environment.wip_B.append(environment.machine_A.orders_inside_the_machine.pop(0))\n\n elif environment.machine_A.orders_inside_the_machine[0].product_type in (4, 5, 6):\n if global_settings.show_machine_output == True:\n print(\"Step \" + str(global_settings.current_time) + \": Machine_A: order finished. orderID: \" +\n str(environment.machine_A.orders_inside_the_machine[0].orderID) + \" || product type: \"\n + str(environment.machine_A.orders_inside_the_machine[0].product_type))\n environment.wip_C.append(environment.machine_A.orders_inside_the_machine.pop(0))\n else:\n raise ValueError(\"No product_type assigned in machine A\")\n\n # Move order from machine_B to WIP D/E/F, depending on product type\n list_of_product_types = [1, 2, 3]\n list_of_wips = [environment.wip_D, environment.wip_E, environment.wip_F]\n orders = environment.machine_B.orders_inside_the_machine\n if len(orders) == 1:\n if orders[0].processing_time_remaining <= 0:\n orders[0].arrival_prodstep_3_wip = global_settings.current_time\n for product_Type in list_of_product_types:\n if orders[0].product_type == product_Type:\n if global_settings.show_machine_output == True:\n print(\"Step \" + str(global_settings.current_time) + \": Machine_B: order finished. \" +\n \"orderID: \" + str(orders[0].orderID) +\n \" || product type: \" + str(orders[0].product_type))\n list_of_wips[list_of_product_types.index(product_Type)].append(orders.pop(0))\n break\n\n # # Move order from machine_C to WIP D/E/F, depending on product type\n list_of_product_types = [4, 5, 6]\n list_of_wips = [environment.wip_D, environment.wip_E, environment.wip_F]\n orders = environment.machine_C.orders_inside_the_machine\n if len(orders) == 1:\n if orders[0].processing_time_remaining <= 0:\n orders[0].arrival_prodstep_3_wip = global_settings.current_time\n for product_Type in list_of_product_types:\n if orders[0].product_type == product_Type:\n if global_settings.show_machine_output == True:\n print(\"Step \" + str(global_settings.current_time) + \": Machine_C: order finished. \" +\n \"orderID: \" + str(orders[0].orderID) +\n \" || product type: \" + str(orders[0].product_type))\n list_of_wips[list_of_product_types.index(product_Type)].append(orders.pop(0))\n break\n\n # Move order from machine_D/E/F to FGI\n list_of_machines = [environment.machine_D, environment.machine_E, environment.machine_F]\n for machine in list_of_machines:\n if len(machine.orders_inside_the_machine) == 1:\n if machine.orders_inside_the_machine[0].processing_time_remaining <= 0:\n if global_settings.show_machine_output == True:\n print(\"Step \" + str(global_settings.current_time) + \": \" + str(\n machine.name) + \": order finished. \" +\n \"orderID: \" + str(machine.orders_inside_the_machine[0].orderID) +\n \" || product type: \" + str(machine.orders_inside_the_machine[0].product_type))\n\n machine.orders_inside_the_machine[0].finished_production_date = global_settings.current_time\n environment.finished_goods_inventory.append(\n machine.orders_inside_the_machine.pop(0))\n\n ##################### Step 2: we move orders from WIPs into the machines\n # Each origin belongs to one destination.\n # The first item in destinations belongs to the first item in origins and so on.\n # The order movements shown in Step 2 do not depend on the order's product type,\n # instead they depend on the machine scheduling policy.\n # In this version, only a first come, first serve policy is implemented.\n list_of_destinations = environment.list_of_all_machines\n list_of_origins = environment.list_of_all_wip_elements\n wip_names = [\"wip_A\", \"wip_B\", \"wip_C\", \"wip_D\", \"wip_E\", \"wip_F\"]\n\n for machine in list_of_destinations:\n if global_settings.scheduling_policy == \"first_come_first_serve\" and \\\n len(machine.orders_inside_the_machine) == 0 and \\\n len(list_of_origins[list_of_destinations.index(machine)]) > 0:\n\n ############ debugging info ############\n if global_settings.show_movements_from_wip_to_machine == True:\n print(\"Step \" + str(\n global_settings.current_time) + \": Order moved from \" +\n wip_names[list_of_destinations.index(machine)] + \" to \" + str(\n machine.name) + \". Orders in \" +\n wip_names[list_of_destinations.index(machine)] + \": \" + str(\n len(list_of_origins[list_of_destinations.index(machine)])))\n ########################\n machine.orders_inside_the_machine.append(list_of_origins[list_of_destinations.index(machine)].pop(0))\n environment.set_new_random_processing_time(machine) # set a new random processing time for the next order\n # if machine.name == \"Machine A\":\n # with open('m1.csv', mode='a') as m1_CSV:\n # results_writer = csv.writer(m1_CSV, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # results_writer.writerow([environment.machine_A.processing_time])\n\n machine.orders_inside_the_machine[0].processing_time_remaining = machine.processing_time\n machine.orders_inside_the_machine[0].arrival_times_m1m2m3.append(global_settings.current_time)\n\n ##################### Step 3: move orders from FGI to shipped when order due date is reached\n if global_settings.current_time % global_settings.duration_of_one_period == 0:\n ship_orders()\n\n return",
"def test_dont_merge_if_multiple_client(self):\r\n raise SkipTest(\"Not implemented\")",
"def recreate_all_sghops (nffg):\n sg_map = NFFGToolBox.get_all_sghop_info(nffg)\n for sg_hop_id, data in sg_map.iteritems():\n src, dst, flowclass, bandwidth, delay = data\n if not (src and dst):\n continue\n if not nffg.network.has_edge(src.node.id, dst.node.id, key=sg_hop_id):\n nffg.add_sglink(src, dst, id=sg_hop_id, flowclass=flowclass,\n bandwidth=bandwidth, delay=delay)\n # causes unnecesary failures, when bandwidth or delay is missing\n # somewhere\n # else:\n # sg_hop = nffg.network[src.node.id][dst.node.id][sg_hop_id]\n # NFFGToolBox._check_flow_consistencity(sg_map, sg_hop)\n return nffg",
"def test_lo_interface_tc2_add_duplicate(duthost):\n json_patch = [\n {\n \"op\": \"add\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|10.1.0.32~132\",\n \"value\": {}\n },\n {\n \"op\": \"add\",\n \"path\": \"/LOOPBACK_INTERFACE/Loopback0|FC00:1::32~1128\",\n \"value\": {}\n }\n ]\n\n tmpfile = generate_tmpfile(duthost)\n logger.info(\"tmpfile {}\".format(tmpfile))\n\n try:\n output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile)\n expect_op_success(duthost, output)\n\n check_show_ip_intf(duthost, \"Loopback0\", [\"10.1.0.32/32\"], [], is_ipv4=True)\n check_show_ip_intf(duthost, \"Loopback0\", [\"fc00:1::32/128\"], [], is_ipv4=False)\n finally:\n delete_tmpfile(duthost, tmpfile)",
"def test_fts_non_recoverable_failures_handled_on_multihop(vo, did_factory, root_account, replica_client, caches_mock, metrics_mock):\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger an non-recoverable error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, **did)\n assert 'Unused hop in multi-hop' in request['err_msg']\n assert request['state'] == RequestState.FAILED\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.FAILED\n assert request['attributes']['source_replica_expression'] == src_rse\n\n # Each hop is a separate transfer, which will be handled by the poller and marked as failed\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 2\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED",
"def _get_next_hop_unchanged(self):\n return self.__next_hop_unchanged",
"def _set_next_hop_unchanged(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"next-hop-unchanged\", rest_name=\"next-hop-unchanged\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Next hop unchanged', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"next_hop_unchanged must be of a type compatible with empty\"\"\",\n 'defined-type': \"empty\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"next-hop-unchanged\", rest_name=\"next-hop-unchanged\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Next hop unchanged', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)\"\"\",\n })\n\n self.__next_hop_unchanged = t\n if hasattr(self, '_set'):\n self._set()",
"def overwrite_on_tape_topology(rse_factory, did_factory, root_account, vo, file_factory):\n\n rse1 = 'XRD1'\n rse1_id = rse_core.get_rse_id(rse=rse1, vo=vo)\n rse2 = 'XRD3'\n rse2_id = rse_core.get_rse_id(rse=rse2, vo=vo)\n rse3 = 'XRD4'\n rse3_id = rse_core.get_rse_id(rse=rse3, vo=vo)\n\n def __generate_and_upload_file(src_rse, dst_rse, simulate_dst_corrupted=False):\n \"\"\"\n Create and upload real files to source and destination. Don't register it on destination. This way, fts will fail if overwrite = False\n\n If simulate_dst_corrupted is True, will upload a different file to destination, to simulate that it is corrupted\n \"\"\"\n local_file = file_factory.file_generator()\n did = did_factory.random_file_did()\n did_factory.upload_test_file(src_rse, path=local_file, **did)\n did_factory.upload_client.upload(\n [\n {\n 'path': file_factory.file_generator(size=3) if simulate_dst_corrupted else local_file,\n 'rse': dst_rse,\n 'did_scope': did['scope'].external,\n 'did_name': did['name'],\n 'no_register': True,\n }\n ]\n )\n return did\n\n def __create_dids(did1_corrupted=True, did2_corrupted=True):\n \"\"\"\n Uploads two files:\n - one which requires multiple transfer hop to go to destination\n - one which can be transferred in one hop to destination rse\n \"\"\"\n # multihop transfer:\n did1 = __generate_and_upload_file(rse1, rse3, simulate_dst_corrupted=did1_corrupted)\n # direct transfer\n did2 = __generate_and_upload_file(rse2, rse3, simulate_dst_corrupted=did2_corrupted)\n rule_core.add_rule(dids=[did1, did2], account=root_account, copies=1, rse_expression=rse3, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n\n return rse1_id, rse2_id, rse3_id, did1, did2\n\n # Fake that destination RSE is a tape\n rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.TAPE})\n try:\n rse_core.add_rse_attribute(rse3_id, 'archive_timeout', 60)\n yield __create_dids\n finally:\n rse_core.update_rse(rse_id=rse3_id, parameters={'rse_type': RSEType.DISK})\n rse_core.del_rse_attribute(rse3_id, 'archive_timeout')",
"async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)",
"def ovp_is_tripped(self):\n self._raise_not_implemented()",
"def hopping(h,name=\"HOPPING.OUT\",reps=0):\n if h.has_eh: raise\n if h.has_spin: (ii,jj,ts) = extract.hopping_spinful(h.intra)\n else: (ii,jj,ts) = extract.hopping_spinless(h.intra)\n f = open(name,\"w\") # write file\n for (i,j,t) in zip(ii,jj,ts):\n f.write(str(h.geometry.r[i][0])+\" \")\n f.write(str(h.geometry.r[i][1])+\" \")\n f.write(str(h.geometry.r[j][0])+\" \")\n f.write(str(h.geometry.r[j][1])+\" \")\n f.write(str(t)+\"\\n\")\n f.close()",
"def test_bookkeeping():\n\n ## CASE 1: alanine dipeptide in vacuum\n # Create vanilla system\n ala = AlanineDipeptideVacuum()\n system = ala.system\n positions = ala.positions\n\n # Create REST system\n system.removeForce(4)\n res1 = list(ala.topology.residues())[1]\n rest_atoms = [atom.index for atom in res1.atoms()]\n factory = RESTTopologyFactory(system, solute_region=rest_atoms)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, system, positions)\n\n ## CASE 2: alanine dipeptide in solvent\n # Create vanilla system\n ala = AlanineDipeptideExplicit()\n system = ala.system\n positions = ala.positions\n\n # Create REST system\n system.removeForce(4)\n res1 = list(ala.topology.residues())[1]\n rest_atoms = [atom.index for atom in res1.atoms()]\n factory = RESTTopologyFactory(system, solute_region=rest_atoms, use_dispersion_correction=True)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, system, positions)\n\n ## CASE 3: alanine dipeptide in solvent with repartitioned hybrid system\n # Create repartitioned hybrid system for lambda 0 endstate\n atp, system_generator = generate_atp(phase='solvent')\n htf = generate_dipeptide_top_pos_sys(atp.topology,\n new_res='THR',\n system=atp.system,\n positions=atp.positions,\n system_generator=system_generator,\n conduct_htf_prop=True,\n generate_repartitioned_hybrid_topology_factory=True,\n endstate=0,\n validate_endstate_energy=False)\n\n # Create REST-ified hybrid system\n res1 = list(htf.hybrid_topology.residues)[1]\n rest_atoms = [atom.index for atom in list(res1.atoms)]\n factory = RESTTopologyFactory(htf.hybrid_system, solute_region=rest_atoms, use_dispersion_correction=True)\n REST_system = factory.REST_system\n\n # Compare energy components\n compare_energy_components(REST_system, htf.hybrid_system, htf.hybrid_positions)",
"def test_hup_intermediate(self):\n process_tree = self.get_process_tree()\n initial_zygote = self.get_zygote(process_tree)\n\n # this should cause the intermediate to die, since it should not have a\n # SIGHUP handler\n os.kill(initial_zygote, signal.SIGHUP)\n time.sleep(1)\n\n process_tree = self.get_process_tree()\n final_zygote = self.get_zygote(process_tree)\n assert_not_equal(initial_zygote, final_zygote)",
"def transfer_missing_elements(target_dict, source_dict, transfer_type=None):\r\n\r\n if transfer_type is None:\r\n transfer_type = source_dict.get(\"_transfer_type_\", \"recursive\")\r\n\r\n for key_, val_ in source_dict.items():\r\n # print(key_,isinstance(val_, dict), val_)\r\n if isinstance(val_, dict):\r\n if key_ not in target_dict:\r\n target_dict[key_] = EasyDict()\r\n if transfer_type is None:\r\n transfer_type = val_.get(\"_transfer_type_\", \"recursive\")\r\n # print(\"*********** \",transfer_type)\r\n\r\n if transfer_type == \"recursive\":\r\n transfer_missing_elements(target_dict[key_], val_, transfer_type)\r\n elif transfer_type == \"update\":\r\n target_dict[key_].update(val_)\r\n elif transfer_type == \"overwrite\":\r\n target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n # target_dict[key_] = val_\r\n\r\n elif key_ not in target_dict:\r\n target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n # target_dict[key_] = val_\r\n # else :\r\n # target_dict[key_] = val_\r\n # target_dict[key_] = copy.deepcopy(source_dict[key_])\r\n\r\n\r\n # if isinstance(source_dict[key_],list) and isinstance(source_dict[key_][0],dict):\r\n # if key_ not in target_dict:\r\n # target_dict[key_] = []\r\n # for src_ in source_dict[key_]:\r\n # if not isinstance(src_,dict):\r\n # continue\r\n # match = False\r\n # for tar_ in target_dict[key_]:\r\n # # TODO make a list of bool with ID keys loaded from odb and check if any(matches):\r\n # if key_matches(\"pth_full\", src_, tar_) or key_matches(\"pth_alias\", src_, tar_) :\r\n # match = True\r\n # if not match:\r\n # temp = EasyDict()\r\n # target_dict[key_].append(temp)\r\n # transfer_missing_elements(temp, src_)\r",
"def test_invalid_overprovision_value(self):\n command_line = [\"pool\", \"overprovision\", \"thispool\", \"1.2\"]\n for prefix in [[], [\"-propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def _perturbInPlaceHard(self):\n die",
"def move_orders_job_shop_1_machine():\n ##################### Step 1: empty the machines that have finished production in the previous step\n # The routing here doesn't contain the first production step, since the routing to that step\n # takes place in the order release process\n\n # Move order from machine to fgi, if processing_time_remaining of order is 0\n if len(environment.machine_A.orders_inside_the_machine) == 1:\n order = environment.machine_A.orders_inside_the_machine[0]\n if order.processing_time_remaining <= 0:\n environment.finished_goods_inventory.append(environment.machine_A.orders_inside_the_machine.pop(0))\n order.current_production_step += 1\n ##################### Step 2: move orders from WIP-A into machine A\n if len(environment.machine_A.orders_inside_the_machine) == 0 and len(environment.wip_A) > 0:\n environment.machine_A.orders_inside_the_machine.append(environment.wip_A.pop(0))\n environment.set_new_random_processing_time(environment.machine_A) # set a new random processing time for the next order\n environment.machine_A.orders_inside_the_machine[0].processing_time_remaining = environment.machine_A.processing_time\n environment.machine_A.orders_inside_the_machine[0].arrival_times_m1m2m3.append(global_settings.current_time)\n ##################### Step 3: move orders from FGI to shipped when order due date is reached\n # Move orders from FGI to shipped_orders once they have reached their due_date\n if global_settings.current_time % global_settings.duration_of_one_period == 0:\n ship_orders()\n return",
"def _checksubrepostate(pushop):\n for n in pushop.outgoing.missing:\n ctx = pushop.repo[n]\n\n if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():\n for subpath in sorted(ctx.substate):\n sub = ctx.sub(subpath)\n sub.verify(onpush=True)",
"def test_applying_vim_strategy_already_exists_and_is_broken(self):\n\n # first api query is what already exists\n # remainder are during the loop\n self.vim_client.get_strategy.side_effect = [\n STRATEGY_APPLY_FAILED,\n ]\n\n # invoke the strategy state operation on the orch thread\n self.worker.perform_state_action(self.strategy_step)\n\n # apply API call should never be invoked\n self.vim_client.apply_strategy.assert_not_called()\n\n # Failure case\n self.assert_step_updated(self.strategy_step.subcloud_id,\n consts.STRATEGY_STATE_FAILED)",
"def move_orders_job_shop():\n # First: Move order from order_pool to the respective WIP\n # Second: route products as shown below\n # P1: M1-M2-M3\n # P2: M1-M3-M2\n # P3: M2-M1-M3\n # P4: M2-M3-M1\n # P5: M3-M1-M2\n # P6: M3-M2-M1\n # Third: after production is done, move order to FGI\n\n ##################### Step 1: empty the machines that have finished production in the previous step\n # The routing here doesn't contain the first production step, since the routing to that step\n # takes place in the order release process\n list_of_product_types = [1, 2, 3, 4, 5, 6]\n list_of_destinations = [\n [environment.wip_B, environment.wip_C, environment.finished_goods_inventory],\n [environment.wip_C, environment.wip_B, environment.finished_goods_inventory],\n [environment.wip_A, environment.wip_C, environment.finished_goods_inventory],\n [environment.wip_C, environment.wip_A, environment.finished_goods_inventory],\n [environment.wip_A, environment.wip_B, environment.finished_goods_inventory],\n [environment.wip_B, environment.wip_A, environment.finished_goods_inventory]\n ]\n # Move order from machine to the next wip, if processing_time_remaining of order is 0\n for machine_element in environment.list_of_all_machines:\n if len(machine_element.orders_inside_the_machine) == 1:\n order = machine_element.orders_inside_the_machine[0]\n if order.processing_time_remaining <= 0:\n destination = \\\n list_of_destinations[list_of_product_types.index(order.product_type)][order.current_production_step]\n # print(\"destination \" + str(len(destination)) + \" | machine \" + str(len(machine_element.orders_inside_the_machine)))\n destination.append(machine_element.orders_inside_the_machine.pop(0))\n # print(\"destination \" + str(len(destination)) + \" | machine \" + str(len(machine_element.orders_inside_the_machine)))\n ##### example case product type 1, step 0:\n # von destinations nehme list item 0 (prodtype)\n # von list item 0 nehme list item 0 (prodstep)\n # füge da die order ein\n order.current_production_step += 1\n\n ##################### Step 2: move orders from WIPs into the machines\n # Each origin belongs to one destination.\n # The first item in destinations belongs to the first item in origins and so on.\n # The order movements shown in Step 2 do not depend on the order's product type,\n # instead they depend on the machine scheduling policy.\n # In this version, only a first come, first serve policy is implemented.\n list_of_destinations = environment.list_of_all_machines\n list_of_origins = environment.list_of_all_wip_elements\n wip_names = [\"wip_A\", \"wip_B\", \"wip_C\", \"wip_D\", \"wip_E\", \"wip_F\"]\n\n for machine in list_of_destinations:\n if global_settings.scheduling_policy == \"first_come_first_serve\" and \\\n len(machine.orders_inside_the_machine) == 0 and \\\n len(list_of_origins[list_of_destinations.index(machine)]) > 0:\n\n ############ debugging info ############\n if global_settings.show_movements_from_wip_to_machine == True:\n print(\"Step \" + str(\n global_settings.current_time) + \": Order moved from \" +\n wip_names[list_of_destinations.index(machine)] + \" to \" + str(\n machine.name) + \". Orders in \" +\n wip_names[list_of_destinations.index(machine)] + \": \" + str(\n len(list_of_origins[list_of_destinations.index(machine)])))\n ########################\n machine.orders_inside_the_machine.append(list_of_origins[list_of_destinations.index(machine)].pop(0))\n environment.set_new_random_processing_time(machine) # set a new random processing time for the next order\n machine.orders_inside_the_machine[0].processing_time_remaining = machine.processing_time\n machine.orders_inside_the_machine[0].arrival_times_m1m2m3.append(global_settings.current_time)\n\n ##################### Step 3: move orders from FGI to shipped when order due date is reached\n # Move orders from FGI to shipped_orders once they have reached their due_date\n if global_settings.current_time % global_settings.duration_of_one_period == 0:\n ship_orders()\n return",
"def test_04_verify_upgraded_ipv6_network_redundant(self):\n\n self.createIpv4NetworkOffering(True)\n self.createIpv6NetworkOfferingForUpdate(True)\n self.createTinyServiceOffering()\n self.prepareRoutingTestResourcesInBackground()\n self.deployNetwork()\n self.deployNetworkVm()\n self.updateNetworkWithOffering()\n self.checkIpv6Network()\n self.checkIpv6NetworkVm()\n self.restartNetworkWithCleanup()\n self.checkIpv6Network()\n self.checkIpv6NetworkRouting()\n self.checkIpv6FirewallRule()\n self.checkNetworkVRRedundancy()",
"def _process_ue_ho_response(self, job: HandOverResponse, overhead: str):\n ue_id = job.ue_id\n prev_ap_id = job.ap_from\n new_ap_id = job.ap_to\n response = job.response\n logging.info(overhead + '%s--->%s: handover to %s response: %s' % (ue_id, self.ap_id, new_ap_id, response))\n assert prev_ap_id == self.ap_id\n assert self.ue_path.pop(ue_id) == self.UE_HANDOVER_TO\n assert self.ue_to_ho_to.pop(ue_id) == new_ap_id\n if not response:\n self.ue_path[ue_id] = self.UE_CONNECTED\n else:\n self._send_connected_ue_list()",
"def undo(self):\n LOG.debug(\"In the undo method, will attempt to restore\")\n\n # validate detected nothing to do for this, nothing was done\n # for execute, so simply return\n if self.no_op:\n return\n\n if not self.source_dev or not self.target_dev:\n return\n LOG.debug(\"The source dictionary is: %s\", self.source_dict_restore)\n LOG.debug(\"The target dictionary is: %s\", self.target_dict_restore)\n\n # In scenario where no source IP Address...\n if self.source_dict_restore:\n self.commandex.send_ifcfg(self.source_dev,\n self.source_dict_restore)\n\n # May have failed because the ifcfg didn't even exist, nothing\n # to roll back then\n if self.target_dict_restore:\n self.commandex.send_ifcfg(self.target_dev,\n self.target_dict_restore)",
"def test_multihop_receiver_on_failure(vo, did_factory, replica_client, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n # Register a did which doesn't exist. It will trigger a failure error during the FTS transfer.\n did = did_factory.random_file_did()\n replica_client.add_replicas(rse=src_rse, files=[{'scope': did['scope'].external, 'name': did['name'], 'bytes': 1, 'adler32': 'aaaaaaaa'}])\n\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.FAILED\n assert 'Unused hop in multi-hop' in request['err_msg']\n\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 1\n\n # Finisher will handle transfers of the same multihop one hop at a time\n finisher(once=True, partition_wait_time=0)\n finisher(once=True, partition_wait_time=0)\n # The intermediate request must not be re-scheduled by finisher\n with pytest.raises(RequestNotFound):\n request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # ensure tha the ranking was correctly decreased for the whole path\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == -1\n assert __get_source(request_id=request['id'], src_rse_id=src_rse_id, **did).ranking == -1\n assert request['state'] == RequestState.QUEUED\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()",
"def shush(self):\n cancel_all()"
]
| [
"0.558939",
"0.5548721",
"0.55455595",
"0.524568",
"0.5092852",
"0.5076279",
"0.50690395",
"0.5054944",
"0.5048982",
"0.50388175",
"0.5008279",
"0.4969457",
"0.49644142",
"0.49466103",
"0.49013048",
"0.48496333",
"0.4841354",
"0.48402858",
"0.48364288",
"0.4832221",
"0.48088387",
"0.479317",
"0.47884613",
"0.4785885",
"0.47612533",
"0.47593904",
"0.4756332",
"0.4754522",
"0.47482675",
"0.47440815"
]
| 0.62804127 | 0 |
Test that submitter and poller call fts with correct certificates in multivo env | def test_multi_vo_certificates(file_config_mock, rse_factory, did_factory, scope_factory, vo, second_vo):
_, [scope1, scope2] = scope_factory(vos=[vo, second_vo])
def __init_test_for_vo(vo, scope):
src_rse, src_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', vo=vo)
dst_rse, dst_rse_id = rse_factory.make_rse(scheme='mock', protocol_impl='rucio.rse.protocols.posix.Default', vo=vo)
all_rses = [src_rse_id, dst_rse_id]
for rse_id in all_rses:
rse_core.add_rse_attribute(rse_id, 'fts', TEST_FTS_HOST)
distance_core.add_distance(src_rse_id, dst_rse_id, distance=10)
account = InternalAccount('root', vo=vo)
did = did_factory.random_file_did(scope=scope)
replica_core.add_replica(rse_id=src_rse_id, scope=scope, name=did['name'], bytes_=1, account=account, adler32=None, md5=None)
rule_core.add_rule(dids=[did], account=account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None,
lifetime=None, locked=False, subscription_id=None, ignore_account_limit=True)
return all_rses
all_rses = []
rses = __init_test_for_vo(vo=vo, scope=scope1)
all_rses.extend(rses)
rses = __init_test_for_vo(vo=second_vo, scope=scope2)
all_rses.extend(rses)
certs_used_by_submitter = []
certs_used_by_poller = []
class _FTSWrapper(FTS3Transfertool):
# Override fts3 transfertool. Don't actually perform any interaction with fts; and record the certificates used
def submit(self, transfers, job_params, timeout=None):
certs_used_by_submitter.append(self.cert[0])
return generate_uuid()
def bulk_query(self, requests_by_eid, timeout=None):
certs_used_by_poller.append(self.cert[0])
return {}
with patch('rucio.core.transfer.TRANSFERTOOL_CLASSES_BY_NAME', new={'fts3': _FTSWrapper}):
submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)
assert sorted(certs_used_by_submitter) == ['DEFAULT_DUMMY_CERT', 'NEW_VO_DUMMY_CERT']
with patch('rucio.daemons.conveyor.poller.FTS3Transfertool', _FTSWrapper):
poller(once=True, older_than=0, partition_wait_time=0)
assert sorted(certs_used_by_poller) == ['DEFAULT_DUMMY_CERT', 'NEW_VO_DUMMY_CERT'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_01_basics(self):\n\n self.ac.start()\n\n arg_dict = {\n \"cert\": os.path.join(self.cs_dir, self.get_cli_cert(\"src\")),\n \"key\": os.path.join(self.keys_dir, self.get_cli_key(\"src\")),\n \"dst\": self.durl,\n \"dcert\": os.path.join(self.cs_dir, self.get_cli_cert(\"dst\")),\n \"dkey\": os.path.join(self.keys_dir, self.get_cli_key(\"dst\")),\n \"pkg\": \"[email protected],5.11-0\",\n \"empty\": os.path.join(self.test_root, \"tmp/empty\"),\n \"noexist\": os.path.join(self.test_root, \"octopus\"),\n \"verboten\": self.verboten,\n }\n\n # We need an image for seed_ta_dir() to work.\n # TODO: there might be a cleaner way of doing this\n self.image_create()\n # Add the trust anchor needed to verify the server's identity.\n self.seed_ta_dir(\"ta7\")\n\n # We try to receive a pkg from a secured repo and publish it to\n # another secured repo where both repos require different\n # credentials\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict))\n\n # Now try to use the same credentials for source and dest.\n # This should fail.\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {key} --dcert {cert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Make sure we don't traceback when credential files are invalid\n # Src certificate option missing\n self.pkgrecv(self.surl, \"--key {key} -d {dst} \"\n \"--dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Dst certificate option missing\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {dkey} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Src key option missing\n self.pkgrecv(self.surl, \"--cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Dst key option missing\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Src certificate not found\n self.pkgrecv(self.surl, \"--key {key} --cert {noexist} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Dst certificate not found\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {noexist} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Src key not found\n self.pkgrecv(self.surl, \"--key {noexist} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Dst key not found\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {noexist} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Src certificate is empty file\n self.pkgrecv(self.surl, \"--key {key} --cert {empty} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Dst certificate is empty file\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {empty} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Src key is empty file\n self.pkgrecv(self.surl, \"--key {empty} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n\n # Dst key is empty file\n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {empty} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), exit=1)\n \n # No permissions to read src certificate \n self.pkgrecv(self.surl, \"--key {key} --cert {verboten} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), su_wrap=True, exit=1)\n\n # No permissions to read dst certificate \n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {verboten} \"\n \"{pkg}\".format(**arg_dict), su_wrap=True, exit=1)\n\n # No permissions to read src key \n self.pkgrecv(self.surl, \"--key {verboten} --cert {cert} \"\n \"-d {dst} --dkey {dkey} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), su_wrap=True, exit=1)\n\n # No permissions to read dst key \n self.pkgrecv(self.surl, \"--key {key} --cert {cert} \"\n \"-d {dst} --dkey {verboten} --dcert {dcert} \"\n \"{pkg}\".format(**arg_dict), su_wrap=True, exit=1)",
"def _worker(self, results):\n keys = {\n \"test-certificate-verify\": {\n \"MD5 forced\": 2,\n \"TLSv1.1 signature in TLSv1.2 Certificate Verify\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-sig-algs\": {\"MD5 first\": 2, \"MITIGATION\": \"SLOTH\"},\n \"test-clienthello-md5\": {\n \"only-md5-rsa-signature_algorithm\": 1,\n \"unknown-signature_algorithm-numbers\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-tls13-pkcs-signature\": {\n \"rsa_pkcs1_md5 signature\": 1,\n \"MITIGATION\": \"SLOTH_MD5_SIGNATURE_TLS_1_3\",\n },\n }\n return self._obtain_results(results, keys)",
"def test_cert(self):\n\n try:\n client = SSLClient(host=FQDN, ip=APPLIANCE, usercert=CERT, sslverifyhost=True, cabundle=CABUNDLE)\n self.assertTrue(1==1, \"SSLClient connects with cabundle\")\n except Exception as exception:\n print(exception)\n self.fail(\"SSLClient did not connect\")\n \n response = client.send_command('LIST')\n self.assertEqual(response.ret, 100)\n\n client.disconnect()",
"def test_vcr():\n http_manager = urllib3.PoolManager()\n response = http_manager.request(\n \"GET\", \"https://developer.xero.com/documentation/oauth2/auth-flow\"\n )\n assert response.status == 200\n assert \"Xero is a multi-tenanted platform.\" in response.data.decode(\"utf-8\")",
"def test_unix_client_account_verification(core_session, agent_enrolled_unix_system_with_users, proxy_start_stop):\n\n \"\"\"\n Testrail Link:\n https://testrail.centrify.com/index.php?/cases/view/1293456\n https://testrail.centrify.com/index.php?/cases/view/1293457\n https://testrail.centrify.com/index.php?/cases/view/1293458\n \"\"\"\n\n # verfiy the test is run with single thread.\n assert 'PYTEST_XDIST_WORKER_COUNT' not in os.environ, \\\n f'This test cannot be run with multiple threads due to starting and stopping connectors'\n\n enrolledsystems = agent_enrolled_unix_system_with_users\n accounts = enrolledsystems[0][\"Accounts\"]\n proxyid = enrolledsystems[0][\"ProxyId\"]\n session = enrolledsystems[0][\"Session\"]\n proxycontrol = proxy_start_stop\n\n logger.info(\"stop the agent\")\n ssh_manager.ssh_stop_agent(session)\n logger.info(\"start the connector\")\n proxycontrol(proxyid, True)\n\n logger.info(\"Verifying accounts, Connector is available\")\n for i, val in enumerate(accounts):\n logger.info(str(i) + \", Name: \" + val[\"Name\"])\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result == 'OK', \"Verify Failed on Account: \" + val['Name'] + \", \" + verify_pass_result\n\n # stop Conector , Should fail\n logger.info(\"Stopping the connector\")\n proxycontrol(proxyid, False)\n logger.info(\"Verifying accounts, no agent or connector\")\n for i, val in enumerate(accounts):\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result != 'OK', \"Verify success on Account: \" + val['Name'] + \", \" + verify_pass_result\n\n # Start agent\n logger.info(\"Starting the agent\")\n ssh_manager.ssh_start_agent(session, True)\n\n logger.info(\"Verifying accounts, agent is available.\")\n for i, val in enumerate(accounts):\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result == 'OK', \"Verify failed on Account: \" + val['Name'] + \", \" + verify_pass_result\n\n # verify account again, both connector and agent are running\n proxycontrol(proxyid, True)\n logger.info(\"Verifying accounts, both agent and connector are available\")\n for i, val in enumerate(accounts):\n verify_pass_result, verify_pass_success = ResourceManager.check_account_health(core_session, val[\"Id\"])\n assert verify_pass_result == 'OK', \"Verify Failed on Account: \" + val['Name'] + \", \" + verify_pass_result",
"def test_successful_program_certificate_generation_with_electives(self):\n run_2 = CourseRunFactory.create(\n freeze_grade_date=now_in_utc() - timedelta(days=1),\n course__program=self.program,\n )\n electives_set = ElectivesSet.objects.create(program=self.program, required_number=1)\n\n for run in [self.run_1, run_2]:\n final_grade = FinalGradeFactory.create(\n user=self.user,\n course_run=run,\n passed=True,\n status='complete',\n grade=0.7\n )\n CourseRunGradingStatus.objects.create(course_run=run, status='complete')\n ElectiveCourse.objects.create(course=run.course, electives_set=electives_set)\n with mute_signals(post_save):\n MicromastersCourseCertificate.objects.create(course=final_grade.course_run.course, user=self.user)\n\n cert_qset = MicromastersProgramCertificate.objects.filter(user=self.user, program=self.program)\n assert cert_qset.exists() is False\n api.generate_program_certificate(self.user, self.program)\n assert cert_qset.exists() is True",
"def main():\n ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'\n #cert_file_name = os.path.join(os.path.dirname(__file__), \"testcert.pem\")\n\n parser = argparse.ArgumentParser(description='Parse a certificate and show days left')\n parser.add_argument('-v', '--verbose', action='store_true', help='show full certificate')\n parser.add_argument('cert', nargs='+', help='certifcate file(s)')\n args = parser.parse_args()\n for cert_file_name in args.cert:\n try:\n cert_dict = ssl._ssl._test_decode_cert(cert_file_name)\n serial = cert_dict['serialNumber']\n subject = dict(x[0] for x in cert_dict['subject'])\n issued_to = subject['commonName']\n time_left = datetime.datetime.strptime(cert_dict['notAfter'], ssl_date_fmt) - datetime.datetime.utcnow()\n if args.verbose:\n pp(cert_dict)\n ssl_expires_in(issued_to, serial, time_left)\n\n except Exception as error:\n print(\"Error decoding certificate: {:}\".format(error))",
"def verify():",
"def test_subscriber_access_for_two_vsg_services(self):",
"def test_add_trusted_project6(self):\n pass",
"def test_add_trusted_project1(self):\n pass",
"def test_cert_request(self):\n oim = OIM()\n rc, _, _, msg = oim.request('--hostname', 'test.' + DOMAIN)\n self.assertEqual(rc, 0, \"Failed to request certificate\\n%s\" % msg)\n self.assert_(oim.reqid != '', msg)",
"def test_use_certificate(self, ctx_or_conn):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )",
"def test_CA_upload_from_all_nodes(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(server=self.master, root_ca_names=[self.x509.root_ca_names[0]])\n self.x509.upload_root_certs(server=self.servers[:self.nodes_init][1],\n root_ca_names=[self.x509.root_ca_names[1]])\n self.x509.upload_root_certs(server=self.servers[:self.nodes_init][2],\n root_ca_names=self.x509.root_ca_names[2:])\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n self.x509.delete_unused_out_of_the_box_CAs(server=self.master)\n self.x509.upload_client_cert_settings(server=self.master)\n self.auth(servers=self.servers[:self.nodes_init])\n content = self.x509.get_trusted_CAs()\n self.log.info(\"Trusted CAs: {0}\".format(content))\n expected_root_ca_names = self.x509.root_ca_names\n actual_root_ca_names = list()\n for ca_dict in content:\n subject = ca_dict[\"subject\"]\n root_ca_name = subject.split(\"CN=\")[1]\n if \"Couchbase Server\" not in root_ca_name:\n actual_root_ca_names.append(root_ca_name)\n if set(actual_root_ca_names) != set(expected_root_ca_names):\n self.fail(\"Expected {0} Actual {1}\".format(expected_root_ca_names,\n actual_root_ca_names))",
"def test_add_trusted_project7(self):\n pass",
"def test_main_results():\n # Due to complexities testing with arguments to get full coverage\n # run the script externally with full arguments\n os.popen('python3 -m pip install -e .')\n os.popen(\n 'python3 Examples/WSO.py -url cn1234.awtest.com -username citests -password hunter2 -tenantcode shibboleet'\n ).read()\n\n filename = \"uem.json\"\n\n assert AUTH.check_file_exists(filename) is True\n assert AUTH.verify_config(filename, 'authorization',\n AUTH.encode(\"citests\", \"hunter2\")) is True\n assert AUTH.verify_config(filename, 'url', \"cn1234.awtest.com\") is True\n assert AUTH.verify_config(filename, 'aw-tenant-code', \"shibboleet\") is True",
"def test_x509_in_verify_works(self):\n serverContext = Context(SSLv23_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n serverConnection = Connection(serverContext, None)\n\n def verify_cb_get_subject(conn, cert, errnum, depth, ok):\n assert cert.get_subject()\n return 1\n\n clientContext = Context(SSLv23_METHOD)\n clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)\n clientConnection = Connection(clientContext, None)\n clientConnection.set_connect_state()\n\n handshake_in_memory(clientConnection, serverConnection)",
"def test_add_trusted_project4(self):\n pass",
"def test_100(self):\n primary_str, equivalent_set = gmn.app.middleware.session_cert.get_authenticated_subjects(\n self.cert_simple_subject_info_pem\n )\n self.assertEqual(\n primary_str,\n 'CN=Roger Dahl A1779,O=Google,C=US,DC=cilogon,DC=org',\n )\n self.assertListEqual(\n sorted(equivalent_set),\n [\n 'CN=Roger Dahl A1779,O=Google,C=US,DC=cilogon,DC=org',\n 'authenticatedUser',\n 'public',\n 'verifiedUser',\n ],\n )",
"def test_create_pod_security_policy_self_subject_review_for_all_namespaces(self):\n pass",
"def test_create_pod_security_policy_subject_review_for_all_namespaces(self):\n pass",
"def test_verifyHostname_matching(self):\n hostname = 'www.google.com'\n url = 'https://' + hostname + '/recaptcha'\n contextFactory = crypto.SSLVerifyingContextFactory(url)\n self.assertEqual(contextFactory.hostname, hostname)\n\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,\n self._certificateText)\n conn = DummyEndpoint()\n result = contextFactory.verifyHostname(conn, x509, 0, 0, True)\n self.assertTrue(result)",
"def test_add_trusted_project2(self):\n pass",
"def test_add_trusted_project3(self):\n pass",
"def test_add_trusted_project5(self):\n pass",
"async def test_complex_nft_offer(\n self_hostname: str, two_wallet_nodes: Any, trusted: Any, royalty_pts: Tuple[int, int, int]\n) -> None:\n full_nodes, wallets, _ = two_wallet_nodes\n full_node_api: FullNodeSimulator = full_nodes[0]\n full_node_server = full_node_api.server\n wallet_node_maker, server_0 = wallets[0]\n wallet_node_taker, server_1 = wallets[1]\n wsm_maker = wallet_node_maker.wallet_state_manager\n wsm_taker = wallet_node_taker.wallet_state_manager\n wallet_maker = wsm_maker.main_wallet\n wallet_taker = wsm_taker.main_wallet\n\n ph_maker = await wallet_maker.get_new_puzzlehash()\n ph_taker = await wallet_taker.get_new_puzzlehash()\n ph_token = bytes32(token_bytes())\n if trusted:\n wallet_node_maker.config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n wallet_node_taker.config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_node_maker.config[\"trusted_peers\"] = {}\n wallet_node_taker.config[\"trusted_peers\"] = {}\n wallet_node_maker.config[\"automatically_add_unknown_cats\"] = True\n wallet_node_taker.config[\"automatically_add_unknown_cats\"] = True\n\n await server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)\n await server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)\n\n # Need money for fees and offering\n for i in range(0, 2):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_maker))\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_taker))\n blocks_needed = 3\n for i in range(blocks_needed):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_taker))\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n await full_node_api.wait_for_wallets_synced(wallet_nodes=[wallet_node_maker, wallet_node_taker], timeout=30)\n\n funds_maker = sum([calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, 3)])\n funds_taker = sum(\n [\n calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))\n for i in range(1, 3 + blocks_needed)\n ]\n )\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n CAT_AMOUNT = uint64(100000000)\n async with wsm_maker.lock:\n cat_wallet_maker: CATWallet = await CATWallet.create_new_cat_wallet(\n wsm_maker, wallet_maker, {\"identifier\": \"genesis_by_id\"}, CAT_AMOUNT, DEFAULT_TX_CONFIG\n )\n async with wsm_maker.lock:\n cat_wallet_taker: CATWallet = await CATWallet.create_new_cat_wallet(\n wsm_taker, wallet_taker, {\"identifier\": \"genesis_by_id\"}, CAT_AMOUNT, DEFAULT_TX_CONFIG\n )\n cat_spend_bundle_maker = (\n await wallet_node_maker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(wallet_maker.id())\n )[0].spend_bundle\n cat_spend_bundle_taker = (\n await wallet_node_taker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(wallet_taker.id())\n )[0].spend_bundle\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, cat_spend_bundle_maker.name()\n )\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, cat_spend_bundle_taker.name()\n )\n\n # We'll need these later\n basic_nft_wallet_maker = await NFTWallet.create_new_nft_wallet(wsm_maker, wallet_maker, name=\"NFT WALLET MAKER\")\n basic_nft_wallet_taker = await NFTWallet.create_new_nft_wallet(wsm_taker, wallet_taker, name=\"NFT WALLET TAKER\")\n\n did_wallet_maker: DIDWallet = await DIDWallet.create_new_did_wallet(wsm_maker, wallet_maker, uint64(1))\n did_wallet_taker: DIDWallet = await DIDWallet.create_new_did_wallet(wsm_taker, wallet_taker, uint64(1))\n did_spend_bundle_maker = (\n await wallet_node_maker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet_maker.id())\n )[0].spend_bundle\n did_spend_bundle_taker = (\n await wallet_node_taker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet_taker.id())\n )[0].spend_bundle\n\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, did_spend_bundle_maker.name()\n )\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, did_spend_bundle_taker.name()\n )\n\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n funds_maker = funds_maker - 1 - CAT_AMOUNT\n funds_taker = funds_taker - 1 - CAT_AMOUNT\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n await time_out_assert(30, cat_wallet_maker.get_confirmed_balance, CAT_AMOUNT)\n await time_out_assert(30, cat_wallet_maker.get_unconfirmed_balance, CAT_AMOUNT)\n await time_out_assert(30, cat_wallet_taker.get_confirmed_balance, CAT_AMOUNT)\n await time_out_assert(30, cat_wallet_taker.get_unconfirmed_balance, CAT_AMOUNT)\n did_id_maker = bytes32.fromhex(did_wallet_maker.get_my_DID())\n did_id_taker = bytes32.fromhex(did_wallet_taker.get_my_DID())\n target_puzhash_maker = ph_maker\n target_puzhash_taker = ph_taker\n royalty_puzhash_maker = ph_maker\n royalty_puzhash_taker = ph_taker\n royalty_basis_pts_maker, royalty_basis_pts_taker_1, royalty_basis_pts_taker_2 = (\n royalty_pts[0],\n uint16(royalty_pts[1]),\n uint16(royalty_pts[2]),\n )\n\n nft_wallet_maker = await NFTWallet.create_new_nft_wallet(\n wallet_node_maker.wallet_state_manager, wallet_maker, name=\"NFT WALLET DID 1\", did_id=did_id_maker\n )\n nft_wallet_taker = await NFTWallet.create_new_nft_wallet(\n wallet_node_taker.wallet_state_manager, wallet_taker, name=\"NFT WALLET DID 1\", did_id=did_id_taker\n )\n metadata = Program.to(\n [\n (\"u\", [\"https://www.chia.net/img/branding/chia-logo.svg\"]),\n (\"h\", \"0xD4584AD463139FA8C0D9F68F4B59F185\"),\n ]\n )\n if royalty_basis_pts_maker > 65535:\n with pytest.raises(ValueError):\n await nft_wallet_maker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_maker,\n royalty_puzhash_maker,\n royalty_basis_pts_maker, # type: ignore\n did_id_maker,\n )\n return\n else:\n sb_maker = await nft_wallet_maker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_maker,\n royalty_puzhash_maker,\n uint16(royalty_basis_pts_maker),\n did_id_maker,\n )\n\n sb_taker_1 = await nft_wallet_taker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_taker,\n royalty_puzhash_taker,\n royalty_basis_pts_taker_1,\n did_id_taker,\n )\n assert sb_maker is not None\n assert sb_taker_1 is not None\n await time_out_assert_not_none(10, full_node_api.full_node.mempool_manager.get_spendbundle, sb_maker.name())\n await time_out_assert_not_none(10, full_node_api.full_node.mempool_manager.get_spendbundle, sb_taker_1.name())\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n funds_maker -= 1\n funds_taker -= 1\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n await time_out_assert(30, get_nft_count, 1, nft_wallet_maker)\n await time_out_assert(30, get_nft_count, 1, nft_wallet_taker)\n\n # MAke one more NFT for the taker\n sb_taker_2 = await nft_wallet_taker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_taker,\n royalty_puzhash_taker,\n royalty_basis_pts_taker_2,\n did_id_taker,\n )\n assert sb_taker_2 is not None\n await time_out_assert_not_none(10, full_node_api.full_node.mempool_manager.get_spendbundle, sb_taker_2.name())\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n funds_taker -= 1\n\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n await time_out_assert(30, get_nft_count, 2, nft_wallet_taker)\n\n trade_manager_maker = wsm_maker.trade_manager\n trade_manager_taker = wsm_taker.trade_manager\n maker_nfts = await nft_wallet_maker.get_current_nfts()\n taker_nfts = await nft_wallet_taker.get_current_nfts()\n nft_to_offer_asset_id_maker: bytes32 = maker_nfts[0].nft_id\n nft_to_offer_asset_id_taker_1: bytes32 = taker_nfts[0].nft_id\n nft_to_offer_asset_id_taker_2: bytes32 = taker_nfts[1].nft_id\n if royalty_basis_pts_maker > 60000:\n XCH_REQUESTED = 20000\n CAT_REQUESTED = 1000\n FEE = uint64(20000)\n else:\n XCH_REQUESTED = 2000000000000\n CAT_REQUESTED = 100000\n FEE = uint64(2000000000000)\n\n complex_nft_offer = {\n nft_to_offer_asset_id_maker: -1,\n cat_wallet_maker.id(): CAT_REQUESTED * -1,\n 1: XCH_REQUESTED,\n nft_to_offer_asset_id_taker_1: 1,\n nft_to_offer_asset_id_taker_2: 1,\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): CAT_REQUESTED,\n }\n\n driver_dict = {\n nft_to_offer_asset_id_taker_1: match_puzzle(uncurry_puzzle(taker_nfts[0].full_puzzle)),\n nft_to_offer_asset_id_taker_2: match_puzzle(uncurry_puzzle(taker_nfts[1].full_puzzle)),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): PuzzleInfo(\n {\n \"type\": \"CAT\",\n \"tail\": \"0x\" + cat_wallet_taker.get_asset_id(),\n }\n ),\n }\n\n success, trade_make, error = await trade_manager_maker.create_offer_for_ids(\n complex_nft_offer, DEFAULT_TX_CONFIG, driver_dict=driver_dict, fee=FEE\n )\n assert error is None\n assert success\n assert trade_make is not None\n\n if royalty_basis_pts_maker == 10000:\n with pytest.raises(ValueError):\n trade_take, tx_records = await trade_manager_taker.respond_to_offer(\n Offer.from_bytes(trade_make.offer),\n wallet_node_taker.get_full_node_peer(),\n DEFAULT_TX_CONFIG,\n fee=FEE,\n )\n # all done for this test\n return\n else:\n trade_take, tx_records = await trade_manager_taker.respond_to_offer(\n Offer.from_bytes(trade_make.offer),\n wallet_node_taker.get_full_node_peer(),\n DEFAULT_TX_CONFIG,\n fee=FEE,\n )\n assert trade_take is not None\n assert tx_records is not None\n await full_node_api.process_transaction_records(records=tx_records)\n\n # Now let's make sure the final wallet state is correct\n maker_royalty_summary = NFTWallet.royalty_calculation(\n {\n nft_to_offer_asset_id_maker: (royalty_puzhash_maker, uint16(royalty_basis_pts_maker)),\n },\n {\n None: uint64(XCH_REQUESTED),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): uint64(CAT_REQUESTED),\n },\n )\n taker_royalty_summary = NFTWallet.royalty_calculation(\n {\n nft_to_offer_asset_id_taker_1: (royalty_puzhash_taker, royalty_basis_pts_taker_1),\n nft_to_offer_asset_id_taker_2: (royalty_puzhash_taker, royalty_basis_pts_taker_2),\n },\n {\n bytes32.from_hexstr(cat_wallet_maker.get_asset_id()): uint64(CAT_REQUESTED),\n },\n )\n maker_xch_royalties_expected = maker_royalty_summary[nft_to_offer_asset_id_maker][0][\"amount\"]\n maker_cat_royalties_expected = maker_royalty_summary[nft_to_offer_asset_id_maker][1][\"amount\"]\n taker_cat_royalties_expected = (\n taker_royalty_summary[nft_to_offer_asset_id_taker_1][0][\"amount\"]\n + taker_royalty_summary[nft_to_offer_asset_id_taker_2][0][\"amount\"]\n )\n funds_maker = int(funds_maker - FEE + XCH_REQUESTED + maker_xch_royalties_expected)\n funds_taker = int(funds_taker - FEE - XCH_REQUESTED - maker_xch_royalties_expected)\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n\n async def get_cat_wallet_and_check_balance(asset_id: str, wsm: Any) -> uint128:\n cat_wallet = await wsm.get_wallet_for_asset_id(asset_id)\n if cat_wallet is None:\n return uint128(0)\n else:\n return uint128(await cat_wallet.get_confirmed_balance())\n\n taker_cat_funds_maker = CAT_REQUESTED + maker_cat_royalties_expected\n maker_cat_funds_taker = CAT_REQUESTED + taker_cat_royalties_expected\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n taker_cat_funds_maker,\n cat_wallet_taker.get_asset_id(),\n wsm_maker,\n )\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n maker_cat_funds_taker,\n cat_wallet_maker.get_asset_id(),\n wsm_taker,\n )\n maker_nfts = await basic_nft_wallet_maker.get_current_nfts()\n taker_nfts = await basic_nft_wallet_taker.get_current_nfts()\n assert len(maker_nfts) == 2\n assert len(taker_nfts) == 1\n\n assert nft_to_offer_asset_id_maker == taker_nfts[0].nft_id\n assert nft_to_offer_asset_id_taker_1 in [nft.nft_id for nft in maker_nfts]\n assert nft_to_offer_asset_id_taker_2 in [nft.nft_id for nft in maker_nfts]\n\n # Try another permutation\n complex_nft_offer = {\n cat_wallet_maker.id(): CAT_REQUESTED * -1,\n 1: int(XCH_REQUESTED / 2),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): CAT_REQUESTED,\n nft_to_offer_asset_id_maker: 1,\n }\n\n driver_dict = {\n nft_to_offer_asset_id_maker: match_puzzle(uncurry_puzzle(taker_nfts[0].full_puzzle)),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): PuzzleInfo(\n {\n \"type\": \"CAT\",\n \"tail\": \"0x\" + cat_wallet_taker.get_asset_id(),\n }\n ),\n }\n\n success, trade_make, error = await trade_manager_maker.create_offer_for_ids(\n complex_nft_offer, DEFAULT_TX_CONFIG, driver_dict=driver_dict, fee=uint64(0)\n )\n assert error is None\n assert success\n assert trade_make is not None\n\n trade_take, tx_records = await trade_manager_taker.respond_to_offer(\n Offer.from_bytes(trade_make.offer),\n wallet_node_taker.get_full_node_peer(),\n DEFAULT_TX_CONFIG,\n fee=uint64(0),\n )\n assert trade_take is not None\n assert tx_records is not None\n await time_out_assert(20, mempool_not_empty, True, full_node_api)\n\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n # Now let's make sure the final wallet state is correct\n funds_maker = int(funds_maker + XCH_REQUESTED / 2)\n funds_taker = int(funds_taker - XCH_REQUESTED / 2)\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n taker_cat_funds_maker + CAT_REQUESTED,\n cat_wallet_taker.get_asset_id(),\n wsm_maker,\n )\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n maker_cat_funds_taker + CAT_REQUESTED,\n cat_wallet_maker.get_asset_id(),\n wsm_taker,\n )\n await time_out_assert(20, get_nft_count, 3, basic_nft_wallet_maker)\n await time_out_assert(20, get_nft_count, 0, basic_nft_wallet_taker)\n assert await basic_nft_wallet_maker.nft_store.get_nft_by_id(nft_to_offer_asset_id_maker) is not None",
"def test_multihop_receiver_on_success(vo, did_factory, root_account, caches_mock, metrics_mock):\n receiver_thread = threading.Thread(target=receiver, kwargs={'id_': 0, 'all_vos': True, 'total_threads': 1})\n receiver_thread.start()\n\n try:\n src_rse = 'XRD1'\n src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)\n jump_rse = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)\n dst_rse = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)\n\n all_rses = [src_rse_id, jump_rse_id, dst_rse_id]\n\n did = did_factory.upload_test_file(src_rse)\n rule_priority = 5\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None, priority=rule_priority)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], group_bulk=2, partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n request = __wait_for_state_transition(dst_rse_id=jump_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n request = __wait_for_state_transition(dst_rse_id=dst_rse_id, run_poller=False, **did)\n assert request['state'] == RequestState.DONE\n\n fts_response = FTS3Transfertool(external_host=TEST_FTS_HOST).bulk_query({request['external_id']: {request['id']: request}})\n assert fts_response[request['external_id']][request['id']].job_response['priority'] == rule_priority\n\n # Two hops; both handled by receiver\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_receiver_update_request_state_total', labels={'updated': 'True'}) >= 2\n finally:\n receiver_graceful_stop.set()\n receiver_thread.join(timeout=5)\n receiver_graceful_stop.clear()",
"def create_ssl_cert_request ( ssl_hostnames ) :\n first_hostname = ssl_hostnames[ 0 ]\n csr_filename = get_ssl_csr_filename( first_hostname )\n key_filename = get_ssl_key_filename( first_hostname )\n openssl_cnf = \"\"\"\n[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = san_ext\n\n[req_distinguished_name]\ncountryName_default = US\nstateOrProvinceName_default = New York\nlocalityName_default = New York\norganizationalUnitName_default = Home Box Office, Inc\ncommonName_default = \"\"\" + first_hostname + \"\"\"\n\n[san_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @sans\n\n[sans]\n\"\"\"\n counter = 0\n for hostname in ssl_hostnames :\n counter += 1\n openssl_cnf += 'DNS.' + str( counter ) + ' = ' + hostname + '\\n'\n\n with open( first_hostname, 'w' ) as f :\n f.write( openssl_cnf )\n cmd = 'openssl req -new -newkey rsa:2048 -nodes -out ' + csr_filename + ' -keyout ' + key_filename\n cmd += ' -config ' + first_hostname + ' -subj \"/C=US/ST=New York/L=New York/O=Home Box Office Inc/CN=' + first_hostname + '\"'\n keygen = subprocess.call( cmd, shell = True )\n os.remove( first_hostname )\n if keygen != 0 :\n print \"Generation of SSL request failed!\"\n return None\n\n return { 'csr-filename' : csr_filename, 'key-filename' : key_filename }",
"def test_cadastros_de_registros_no_site_rpa_challenge():",
"def test_client_verification_retrieve(self):\n pass"
]
| [
"0.58199227",
"0.5805059",
"0.570264",
"0.56927806",
"0.568771",
"0.5604449",
"0.5598037",
"0.55067015",
"0.5504084",
"0.5498982",
"0.54972184",
"0.54926276",
"0.5492502",
"0.5478508",
"0.54717046",
"0.5460335",
"0.54358",
"0.54354906",
"0.54316694",
"0.54170007",
"0.54105514",
"0.5410287",
"0.5370613",
"0.5367694",
"0.5356601",
"0.53530264",
"0.53463304",
"0.5329675",
"0.5300386",
"0.5263136"
]
| 0.6473819 | 0 |
Demonstrates lenet on MNIST dataset | def evaluate_lenet5(dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500):
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our ConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = ConvPoolLayer(
rng,
input=layer0_input,
input_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = ConvPoolLayer(
rng,
input=layer0.output,
input_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
print '... loading saved model params'
saved_params = joblib.load('model/lenet5_params.pkl')
layer0.load_params(saved_params['conv1'])
layer1.load_params(saved_params['conv2'])
layer2.load_params(saved_params['fc1'])
layer3.load_params(saved_params['log1'])
# the cost we minimize during training is the NLL of the model
# cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
print '... testing'
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print 'test error: ', test_score * 100, '%' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized",
"def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))",
"def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)",
"def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"",
"def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y",
"def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test",
"def evaluate_lenet5(learning_rate=0.1, n_epochs=200,\r\n dataset='mnist.pkl.gz',\r\n nkerns=[20, 50], batch_size=500):\r\n\r\n rng = numpy.random.RandomState(23455)\r\n\r\n datasets = load_data(dataset)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0]\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0]\r\n n_train_batches /= batch_size\r\n n_valid_batches /= batch_size\r\n n_test_batches /= batch_size\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n ishape = (28, 28) # this is the size of MNIST images\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n print '... building the model'\r\n\r\n # Reshape matrix of rasterized images of shape (batch_size,28*28)\r\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\r\n layer0_input = x.reshape((batch_size, 1, 28, 28))\r\n\r\n # Construct the first convolutional pooling layer:\r\n # filtering reduces the image size to (28-5+1,28-5+1)=(24,24)\r\n # maxpooling reduces this further to (24/2,24/2) = (12,12)\r\n # 4D output tensor is thus of shape (batch_size,nkerns[0],12,12)\r\n layer0 = LeNetConvPoolLayer(rng, input=layer0_input,\r\n image_shape=(batch_size, 1, 28, 28),\r\n filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2))\r\n\r\n # Construct the second convolutional pooling layer\r\n # filtering reduces the image size to (12-5+1,12-5+1)=(8,8)\r\n # maxpooling reduces this further to (8/2,8/2) = (4,4)\r\n # 4D output tensor is thus of shape (nkerns[0],nkerns[1],4,4)\r\n layer1 = LeNetConvPoolLayer(rng, input=layer0.output,\r\n image_shape=(batch_size, nkerns[0], 12, 12),\r\n filter_shape=(nkerns[1], nkerns[0], 5, 5), poolsize=(2, 2))\r\n\r\n # the HiddenLayer being fully-connected, it operates on 2D matrices of\r\n # shape (batch_size,num_pixels) (i.e matrix of rasterized images).\r\n # This will generate a matrix of shape (20,32*4*4) = (20,512)\r\n layer2_input = layer1.output.flatten(2)\r\n\r\n # construct a fully-connected sigmoidal layer\r\n layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[1] * 4 * 4,\r\n n_out=500, activation=T.tanh)\r\n\r\n # classify the values of the fully-connected sigmoidal layer\r\n layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)\r\n\r\n # the cost we minimize during training is the NLL of the model\r\n cost = layer3.negative_log_likelihood(y)\r\n\r\n # create a function to compute the mistakes that are made by the model\r\n test_model = theano.function([index], layer3.errors(y),\r\n givens={\r\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: test_set_y[index * batch_size: (index + 1) * batch_size]})\r\n\r\n validate_model = theano.function([index], layer3.errors(y),\r\n givens={\r\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]})\r\n\r\n # create a list of all model parameters to be fit by gradient descent\r\n params = layer3.params + layer2.params + layer1.params + layer0.params\r\n\r\n # create a list of gradients for all model parameters\r\n grads = T.grad(cost, params)\r\n\r\n # train_model is a function that updates the model parameters by\r\n # SGD Since this model has many parameters, it would be tedious to\r\n # manually create an update rule for each model parameter. We thus\r\n # create the updates list by automatically looping over all\r\n # (params[i],grads[i]) pairs.\r\n updates = []\r\n for param_i, grad_i in zip(params, grads):\r\n updates.append((param_i, param_i - learning_rate * grad_i))\r\n\r\n train_model = theano.function([index], cost, updates=updates,\r\n givens={\r\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: train_set_y[index * batch_size: (index + 1) * batch_size]})\r\n\r\n ###############\r\n # TRAIN MODEL #\r\n ###############\r\n print '... training'\r\n # early-stopping parameters\r\n patience = 10000 # look as this many examples regardless\r\n patience_increase = 2 # wait this much longer when a new best is\r\n # found\r\n improvement_threshold = 0.995 # a relative improvement of this much is\r\n # considered significant\r\n validation_frequency = min(n_train_batches, patience / 2)\r\n # go through this many\r\n # minibatche before checking the network\r\n # on the validation set; in this case we\r\n # check every epoch\r\n\r\n best_params = None\r\n best_validation_loss = numpy.inf\r\n best_iter = 0\r\n test_score = 0.\r\n start_time = time.clock()\r\n\r\n epoch = 0\r\n done_looping = False\r\n\r\n while (epoch < n_epochs) and (not done_looping):\r\n epoch = epoch + 1\r\n for minibatch_index in xrange(n_train_batches):\r\n\r\n iter = (epoch - 1) * n_train_batches + minibatch_index\r\n\r\n if iter % 100 == 0:\r\n print 'training @ iter = ', iter\r\n cost_ij = train_model(minibatch_index)\r\n\r\n if (iter + 1) % validation_frequency == 0:\r\n\r\n # compute zero-one loss on validation set\r\n validation_losses = [validate_model(i) for i\r\n in xrange(n_valid_batches)]\r\n this_validation_loss = numpy.mean(validation_losses)\r\n print('epoch %i, minibatch %i/%i, validation error %f %%' % \\\r\n (epoch, minibatch_index + 1, n_train_batches, \\\r\n this_validation_loss * 100.))\r\n\r\n # if we got the best validation score until now\r\n if this_validation_loss < best_validation_loss:\r\n\r\n #improve patience if loss improvement is good enough\r\n if this_validation_loss < best_validation_loss * \\\r\n improvement_threshold:\r\n patience = max(patience, iter * patience_increase)\r\n\r\n # save best validation score and iteration number\r\n best_validation_loss = this_validation_loss\r\n best_iter = iter\r\n\r\n # test it on the test set\r\n test_losses = [test_model(i) for i in xrange(n_test_batches)]\r\n test_score = numpy.mean(test_losses)\r\n print((' epoch %i, minibatch %i/%i, test error of best '\r\n 'model %f %%') %\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n test_score * 100.))\r\n\r\n if patience <= iter:\r\n done_looping = True\r\n break\r\n\r\n end_time = time.clock()\r\n print('Optimization complete.')\r\n print('Best validation score of %f %% obtained at iteration %i,'\\\r\n 'with test performance %f %%' %\r\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\r\n print >> sys.stderr, ('The code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.2fm' % ((end_time - start_time) / 60.))",
"def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)",
"def __init__(self):\n\n TEST_RATIO = 0.05\n mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None)\n idxs = np.arange(mnist_trainset.train_data.size(0))\n np.random.shuffle(idxs)\n\n #print(torch.min(mnist_trainset.train_labels), torch.max(mnist_trainset.train_labels))\n #print(mnist_trainset.train_labels.size())\n \n # reshape input data to (1, 784) and normalize to range [0., 1.]\n self.train_data = torch.reshape(\n mnist_trainset.train_data[idxs].float(), (-1,1,28,28))/255.\n self.data_size = self.train_data.size(0)\n self.train_len = self.train_data.size(0)\n self.train_label = torch.Tensor([1]).float() # since there is only one class - 'real' image\n\n print('Train images -- {}'.format(self.train_data.size()))",
"def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)",
"def get_mnist(one_hot_enc, normalized, flatten):",
"def classify_lenet5(batch_size=500, output_size=20):\n\n rng = numpy.random.RandomState(23455)\n\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 37, 23))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 37, 23),\n filter_shape=(20, 1, 4, 2),\n poolsize=(2, 2),\n )\n\n # layer1 = LeNetConvPoolLayer(\n # rng,\n # input=layer0.output,\n # image_shape=(batch_size, 20, 17, 11),\n # filter_shape=(50, 20, 4, 2),\n # poolsize=(2, 2),\n # )\n #\n # layer4 = LeNetConvPoolLayer(\n # rng,\n # input=layer1.output,\n # image_shape=(batch_size, 50, 7, 5),\n # filter_shape=(100, 50, 4, 2),\n # poolsize=(2, 2),\n # )\n\n layer2_input = layer0.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=3740,\n n_out=output_size,\n activation=T.tanh,\n use_bias=True\n )\n\n # layer5 = HiddenLayer(\n # rng,\n # input=layer2.output,\n # n_in=200,\n # n_out=output_size,\n # activation=T.tanh,\n # use_bias=True\n # )\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer2.output, n_in=output_size, n_out=2)\n\n model_params = pickle.load(open('../model/cnn_dist_'+str(output_size)+'.pkl'))\n #\n layer0.W = theano.shared(\n value=numpy.array(\n model_params[2].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer0.b = theano.shared(\n value=numpy.array(\n model_params[3].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer1.W = theano.shared(\n # value=numpy.array(\n # model_params[-4].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer1.b = theano.shared(\n # value=numpy.array(\n # model_params[-3].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n #\n # layer4.W = theano.shared(\n # value=numpy.array(\n # model_params[-6].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer4.b = theano.shared(\n # value=numpy.array(\n # model_params[-5].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer2.W = theano.shared(\n value=numpy.array(\n model_params[0].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer2.b = theano.shared(\n value=numpy.array(\n model_params[1].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer5.W = theano.shared(\n # value=numpy.array(\n # model_params[-10].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer5.b = theano.shared(\n # value=numpy.array(\n # model_params[-9].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer3.W = theano.shared(\n value=numpy.array(\n model_params[4].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer3.b = theano.shared(\n value=numpy.array(\n model_params[5].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # params = layer3.params + layer5.params + layer2.params + layer4.params + layer1.params + layer0.params\n\n datasets = load_data(None)\n\n sets = ['train', 'dev', 'test']\n dimension = [20000, 20000, 20000]\n for k in range(3):\n if k == 0:\n classify_set_x, classify_set_y, classify_set_z, classify_set_m, classify_set_c, classify_set_b= datasets[k]\n else:\n classify_set_x, classify_set_y, classify_set_z= datasets[k]\n\n # compute number of minibatches for training, validation and testing\n n_classify_batches = classify_set_x.get_value(borrow=True).shape[0]\n n_classify_batches /= batch_size\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n classify = theano.function(\n [index],\n layer2.output,\n givens={\n x: classify_set_x[index * batch_size: (index + 1) * batch_size],\n }\n )\n\n r = []\n\n for i in xrange(n_classify_batches):\n m = classify(i)\n r.extend(m)\n r = np.array(r)\n print r.shape\n r = np.append(r, np.reshape(classify_set_y.eval(),(dimension[k], 1)), 1)\n numpy.savetxt('../extractedInformation/cnn_dist_'+str(output_size)+'/'+sets[k]+'.csv', r, delimiter=\",\")",
"def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()",
"def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist",
"def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)",
"def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)",
"def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels",
"def trainNet():",
"def run_mnist_test():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_x, train_y = mnist.train.images, mnist.train.labels,\n test_x, test_y = mnist.test.images, mnist.test.labels\n # Reshape right off the bat to save some time.\n train_x = train_x.reshape(-1, 28, 28, 1)\n test_x = test_x.reshape(-1, 28, 28, 1)\n\n conv1 = LeNetClassifier.ConvLayer(kernel_width=5, kernel_height=5,\n feature_maps=1)\n conv2 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=32)\n conv3 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=64)\n network = LeNetClassifier((28, 28, 1), [conv1, conv2, conv3],\n [4 * 4 * 128, 625], 10, batch_size=128)\n\n saver = tf.train.Saver()\n\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n\n writer = tf.train.SummaryWriter(\"mnist_logs\", sess.graph_def)\n\n print(\"Tensorflow: Starting MNIST test...\")\n\n accuracy = 0\n start_time = time.time()\n iterations = 0\n while iterations < 2000:\n if iterations % 500 == 0:\n test_batch = mnist.test.next_batch(128)\n result = sess.run(network.predict(),\n feed_dict={network.inputs(): test_batch[0],\n network.expected_outputs(): test_batch[1]})\n argmax = np.argmax(test_batch[1], axis=1)\n accuracy = np.mean(argmax == result)\n print(\"Tensorflow: step %d, testing accuracy %s\" % \\\n (iterations, accuracy))\n\n batch = mnist.train.next_batch(128)\n sess.run(network.train(), feed_dict={network.inputs(): batch[0],\n network.expected_outputs(): batch[1]})\n iterations += 1\n\n # Save the network at the end.\n #saver.save(sess, \"Variables/test.ckpt\")\n\n elapsed = time.time() - start_time\n speed = iterations / elapsed\n print(\"Tensorflow: Ran %d training iterations. (%f iter/s)\" % \\\n (iterations, speed))\n print(\"Tensorflow: MNIST test completed in %f seconds.\" % (elapsed))\n return (elapsed, speed)",
"def test_train():\n set_seed(42) # Noqa\n transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n mnist_train = MNIST(\"./\", download=True, train=False, transform=transform)\n model = SimpleNet()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)\n criterion = nn.CrossEntropyLoss()\n\n train_loader = DataLoader(mnist_train, batch_size=64, shuffle=True,\n num_workers=0)\n loss, accuracy = train(model, optimizer, criterion, train_loader,\n imshape=(-1, 28*28))\n\n assert type(loss) == torch.Tensor\n assert type(accuracy) == np.float64\n assert len(loss.shape) == 0",
"def load_mnist(dataset_name='mnist', **kwargs):\n dataset_name = dataset_name.strip().lower().replace('minist', 'mnist')\n\n if dataset_name.lower() not in ['mnist', 'fashion-mnist']:\n raise ValueError('Only mnist or fashion-mnist are valid dataset_name.')\n\n base = 'http://yann.lecun.com/exdb/mnist/'\n if dataset_name == 'fashion-mnist':\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n\n dirname = os.path.join(_trident_dir, dataset_name)\n make_dir_if_need(dirname)\n\n \"\"\"Load MNIST data from `path`\"\"\"\n trainData = None\n testData = None\n for kind in ['train', 'test']:\n labels_file = '{0}-labels-idx1-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n images_file = '{0}-images-idx3-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n # if dataset_name == 'emnist' :\n # labels_file='emnist-balanced-'+labels_file\n # images_file = 'emnist-balanced-' + images_file\n\n is_data_download = download_file(base + labels_file, dirname, labels_file, dataset_name + '_labels_{0}'.format(kind))\n is_label_download = download_file(base + images_file, dirname, images_file, dataset_name + '_images_{0}'.format(kind))\n if is_data_download and is_label_download:\n labels_path = os.path.join(dirname, labels_file)\n images_path = os.path.join(dirname, images_file)\n labeldata = None\n imagedata = None\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n labels = np.squeeze(labels).astype(np.int64)\n labeldata = LabelDataset(labels.tolist(),object_type=ObjectType.classification_label)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16)\n images = np.reshape(images, (len(labels), 784)).astype(dtype=_session.floatx)\n images = np.reshape(images, (-1, 28, 28))\n imagedata = ImageDataset(images, object_type=ObjectType.gray)\n if kind == 'train':\n trainData = Iterator(data=imagedata, label=labeldata)\n else:\n testData = Iterator(data=imagedata, label=labeldata)\n\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if dataset_name == 'mnist' else ['T-shirt/top', 'Trouser', 'Pullover',\n 'Dress', 'Coat', 'Sandal', 'Shirt',\n 'Sneaker', 'Bag', 'Ankle boot'],\n 'en-US')\n\n return dataset\n return None",
"def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y",
"def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()",
"def get_mnist_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # Input image dimensions\n img_rows, img_cols = 28, 28\n\n # Get the data.\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n \n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n #x_train = x_train.reshape(60000, 784)\n #x_test = x_test.reshape(10000, 784)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n # convert class vectors to binary class matrices\n #y_train = keras.utils.to_categorical(y_train, nb_classes)\n #y_test = keras.utils.to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)",
"def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)",
"def get_mnist_data(batch=128):\n \n def transformer(data, label):\n data = data.flatten().expand_dims(0).astype(np.float32)/255\n data = data-0.13/0.31\n label = label.astype(np.float32)\n return data, label\n\n train_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=True, transform=transformer)\n validation_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=False, transform=transformer)\n train_dataloader = gluon.data.DataLoader(train_dataset, batch_size=batch, last_batch='keep',shuffle=True)\n validation_dataloader = gluon.data.DataLoader(validation_dataset, batch_size=batch, last_batch='keep')\n \n return train_dataloader, validation_dataloader",
"def num_training_examples(self):",
"def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y",
"def test_len_trainset(self):\n self.assertEqual(self.__dataset.get_train_len, 10000)",
"def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels"
]
| [
"0.7114263",
"0.6918745",
"0.6859507",
"0.6718894",
"0.67088884",
"0.66813",
"0.66771555",
"0.66229486",
"0.6597695",
"0.6492547",
"0.64591664",
"0.6383095",
"0.636698",
"0.6356136",
"0.63402",
"0.6279806",
"0.6273015",
"0.62261784",
"0.6214414",
"0.6202346",
"0.6199214",
"0.61644787",
"0.6144584",
"0.61403054",
"0.61355335",
"0.613392",
"0.6127851",
"0.6124691",
"0.60904986",
"0.6079255"
]
| 0.7104353 | 1 |
Assuming that e is an NxN expression, return the lower triangular part as a vector. | def vec(e):
N = e.getShape().dim(0)
rows = [i for i in range(N) for j in range(i,N)]
cols = [j for i in range(N) for j in range(i,N)]
vals = [ 2.0**0.5 if i!=j else 1.0 for i in range(N) for j in range(i,N)]
return Expr.flatten(Expr.mulElm(e, Matrix.sparse(N,N,rows,cols,vals))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def basis_vector_matrix(*e):\n return np.array(e).T",
"def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):\n x = tf.nn.l2_normalize(x)\n for _ in range(num_steps):\n x = eig_one_step(x, learning_rate, vector_prod_fn)\n return x",
"def edge_vector(edge):\n v1, v2 = edge.verts\n return (v2.co - v1.co).normalized()",
"def lowest_rank_approx(A,e):\n \n \n U,s,Vh=la.svd(A,full_matrices=False)\n t=s.copy()\n t[t>e]=0\n i=t.nonzero()[0][0]\n \n return U[:,:i].dot(np.diag(s[:i])).dot(Vh[:i,:])",
"def lin_t_func(self):\n mat = self.precalc_mat[0]\n dim_x = self.sys[2].shape[0]\n\n return mat[1, 0, 1][dim_x:]",
"def inverse_e(self, e):\n return (e - self.e_min) / self.e_range",
"def vFrmE(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n v=np.sqrt((2.*Ej)/m)\n return(v)",
"def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)",
"def vsq_from_E(E):\n Ej=E*1.6021*10**-22\n m=1.674929*10**-27\n return (2.*Ej)/m",
"def gen_matrix(e):\n\tif e < 1:\n\t\treturn None\n\tm_list = [[[1, 2], [3, 0]]]\n\t_b = m_list[0]\n\tfor n in xrange(1, e):\n\t\tm = m_list[n - 1]\n\t\tm_list.append(\n\t\t\t[\n\t\t\t\t[4 * i + _b[0][0] for i in m[0]] + [4 * i + _b[0][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[0][0] for i in m[1]] + [4 * i + _b[0][1] for i in m[1]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[0]] + [4 * i + _b[1][1] for i in m[0]],\n\t\t\t\t[4 * i + _b[1][0] for i in m[1]] + [4 * i + _b[1][1] for i in m[1]],\n\t\t\t]\n\t\t)\n\treturn m_list",
"def incoming_edge_vector(node, node_xyz, edge, normalize=False):\n u, v = edge\n other = u if u != node else v\n\n return vector_two_nodes(node_xyz[other], node_xyz[node], normalize)",
"def inv(a):\n a, cv, isMatrix = get_computation_matrix(a)\n t_dtype = TypeUtil.to_numpy_dtype(a.get_dtype())\n if a.numRows() != a.numCols():\n raise ValueError(\"inv: input a is not a square matrix!\")\n #compute LU using getrf\n (lu, piv, _) = getrf(a, overwrite_a=1, dtype=t_dtype)\n (ainv, _) = getri(lu, piv, lwork=0, overwrite_lu=1, dtype=t_dtype)\n if cv:\n if isMatrix:\n return ainv.to_numpy_matrix()\n else:\n return ainv.to_numpy_array()\n else:\n return ainv",
"def call_single_vec(self, input_value):\n _, eigVectors = self.getEigen(input_value)\n return eigVectors[:,:,-1]",
"def triax2squeeze(t,e):\n a = 1.\n c = e\n b = np.sqrt(c**2 - t*(c**2-a**2))\n return b",
"def extract_ltri( m, context = FloatContext ):\n zero = context.zero\n n,n_ = shape_mat(m)\n return [[ m[i][j] if i >= j else zero \n for j in xrange(n_)] \n for i in xrange(n)]",
"def extract_ltri( m, context = FloatContext ):\n rows, cols = shape_mat(m)\n return [ row[:i+1] + [context.zero]*(cols - i - 1) \n for i, row in enumerate(m) ]",
"def svec(A):\n \n n = A.shape[0]\n B = A.copy()\n B[np.triu_indices(n, 1)] = B[np.triu_indices(n, 1)] * np.sqrt(2)\n return B[np.triu_indices(n)]",
"def V(E, g, gl):\n num = 0\n den = 0\n for i in range(len(E)):\n num += E[i][0]*g[i][0] + E[i][1]*g[i][1]\n den += g[i][0] + g[i][1] + gl\n return num / den",
"def lowest_rank_approx(A,e):",
"def _flatten_lower_triangle(matrix):\r\n matrix = asarray(matrix)\r\n flattened = []\r\n for col_num in range(matrix.shape[1]):\r\n for row_num in range(matrix.shape[0]):\r\n if col_num < row_num:\r\n flattened.append(matrix[row_num][col_num])\r\n return flattened",
"def pde_eigv(self, u):\n u0, u1, u2 = u.T\n c = np.sqrt(9.81*u0)\n vel = np.sqrt((u1/u0)**2 + (u2/u0)**2)\n return np.array([vel-c, vel, vel+c])",
"def matrixOfEigenvec(A):\n eigenvals, eigenvecs = la.eig(A)\n return eigenvecs",
"def get_linearEvolving(self):\n return self.get_linearEvolvingEigen()",
"def invtr(A, overwrite=False, lower=False): \n trtri, = la.lapack.get_lapack_funcs(('trtri',), (A,))\n \n inv_A, info = trtri(A, lower=lower, overwrite_c=overwrite)\n \n if info > 0:\n raise sp.LinAlgError(\"%d-th diagonal element of the matrix is zero\" % info)\n if info < 0:\n raise ValueError('illegal value in %d-th argument of internal potri'\n % -info) \n \n return inv_A",
"def triangle_operation(self):\n \n shape = np.shape(self)\n m,n = shape\n \n R = np.copy(self)\n \n #Dependent on the shape of the matrix you have to do the transformation on a \n #different number r of columns\n if m > n:\n r = n\n elif n > m:\n r = m\n else:\n r = n - 1\n \n # Create identity matrix I of same size as A\n I = np.zeros(m*r).reshape(m,r)\n \n I[:r] = np.identity(r)\n\n # Create list_v \n list_v = []\n\n # write out vectors a and e of decreasing size from the columns of R and I \n \n for j in list(range(r)): \n a = [row[j] for row in R[j:]] # j'th column of A but only the m-i last rows.\n e = [row[j] for row in I[j:]] # same for the identity matrix\n \n a = np.array(a)\n e = np.array(e)\n sigma = np.linalg.norm(a) # this is the norm of the vector/column of A \n v = a.reshape(m-j,1) + (np.dot(sigma, e.reshape(m-j,1))) # v = x + sigma * e\n list_v.append(v)\n\n H = Reflection(list_v[j]) # calculate the Housholder transformation for the vector v\n R = H * R # apply the transformation to the matrix A and obtain R stepwise\n\n \n return(R, list_v)",
"def eigen_vector_i(self, i):\n return self._eig_vec[:,i]",
"def ev(knotvec, coeffs, u):\n assert len(coeffs) == knotvec.numdofs, 'Wrong size of coefficient vector'\n return scipy.interpolate.splev(u, (knotvec.kv, coeffs, knotvec.p))",
"def eigenv2tensor(axis):\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n #print v\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp",
"def get_inverse(a):\n if len(a) == len(a[0]):\n i = get_identity(len(a))\n inverse = gaussian_solve(a, i)\n return inverse",
"def _rowvec(x):\n return _colvec(x).transpose()"
]
| [
"0.6288495",
"0.6025632",
"0.59803706",
"0.59170324",
"0.5578252",
"0.554264",
"0.55426145",
"0.545888",
"0.5450306",
"0.54380167",
"0.5430265",
"0.5418534",
"0.5400798",
"0.5389048",
"0.5374389",
"0.53610647",
"0.535368",
"0.53154343",
"0.5296928",
"0.52742046",
"0.52679",
"0.5253483",
"0.5252303",
"0.52433884",
"0.5216274",
"0.52042645",
"0.518737",
"0.5181248",
"0.5176322",
"0.5175404"
]
| 0.6366133 | 0 |
Loading sequence of W,T from file. Either x is a file path or sequence | def _load_wt_file(self, x):
if type(x) == str:
# Load file
x = np.loadtxt(x)
W = x[:self.dimY*self.dimX].reshape(self.dimY,self.dimX)
W = W.transpose() # convention
T = x[self.dimY*self.dimX:].reshape(self.dimY,self.dimY) # no need to transpose
return W, T | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(filename):\r\n wholeTract= nib.streamlines.load(filename) \r\n wholeTract = wholeTract.streamlines\r\n return wholeTract",
"def load_sequence_of_play(filename):\r\n import xml.etree.ElementTree as ET\r\n return SequenceOfPlay(ET.parse(filename).getroot())",
"def LoadBatch(filename):",
"def load_dataset(filepath):\n \n X = list()\n x = list()\n\n Y = list()\n y = list()\n \n for line in open(filepath):\n # blank lines separate sequences\n if len(line) <= 1:\n X.append(x)\n Y.append(y)\n\n x = list()\n y = list()\n else:\n a, b = line.strip().split('\\t')\n x.append(a)\n y.append(b)\n \n return X, Y",
"def _load_wt(self, x):\n W = x[:self.dimY*self.dimX].reshape(self.dimY,self.dimX) \n W = W.transpose() # Coz W is stored in such way\n # print(W)\n T = x[self.dimY*self.dimX:].reshape(self.dimY,self.dimY).transpose() \n return W, T",
"def loadfile():\n try:\n x = tkinter.filedialog.askopenfilename()\n except TypeError:\n return\n if not x:\n return\n y = x.split(\".\")\n if y[-1] == \"fits\":\n # TODO: this is extremely stupid and dummy. Create new function for converting\n # add proper formating etc\n hdulist = fits.open(x)\n tbdata = hdulist[1].data\n a = tbdata.field('TMID')/86400.0 + 2453005.5\n b = 15 - 2.5*numpy.log10(tbdata.field('TAMFLUX2'))\n out = \"\"\n for i in range(len(a)):\n out += str(a[i]) + \" \" * 5 + str(b[i]) + \"\\n\"\n return (x, out)\n else:\n file = open(x)\n y = file.read()\n file.close()\n s = (x, y)\n return s",
"def load_input(filename: str) -> list:\n\n text_stream = io.open(filename, 'r', encoding='utf-8', errors='ignore', newline='\\n')\n \"\"\" Calls Python's io function to read the file with the specified name.\"\"\"\n\n initial_state = []\n for i in range(0, 4):\n initial_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The rstrip method removes all trailing whitespace of the string. The split \n method uses the given character as the delimiter to break down the string and \n return a list of the substrings. The map function takes that list, converts \n the substrings into integers and returns a map object, which is eventually \n converted into a list by the exterior call to the list function. \"\"\"\n\n \"\"\" A state is represented as a multi-layer list. The first layer contains \n the four rows, each of which is a second layer that consists of four tiles. \"\"\"\n\n blank_line = text_stream.readline()\n \"\"\" In the input file, there is a blank line in between the two states.\"\"\"\n\n goal_state = []\n for i in range(0, 4):\n goal_state.append(list(map(int, text_stream.readline().rstrip().split(' '))))\n \"\"\" The construct of this part is identical to the one above. \"\"\"\n\n text_stream.close()\n\n ret = [initial_state, goal_state]\n \"\"\" Returns the two lists that represent the initial and goal states, \n respectively. \"\"\"\n return ret",
"def load_training(file_name, target_val, training_data, training_targets, \n elements):\n\n file = open(file_name, \"r\")\n\n # Iterate over file until empty line recieved\n while True:\n chunk = file.readline()\n\n if(chunk == ''):\n break\n\n ret = load_chunk(chunk, elements)\n\n training_targets.append(target_val)\n\n # Convert data to frequency domain using fft()\n training_data.append([i.real for i in fft(ret)])",
"def load_one_file_for_keras(file_path, alphabet_pickle):\n\n file_feat_list = open(file_path).read().split()\n\n pkl = open(alphabet_pickle, 'rb')\n token2int = pickle.load(pkl)\n\n example = []\n for token in set(file_feat_list):\n if token in token2int:\n example.append(token2int[token])\n else:\n example.append(token2int['oov_word'])\n\n return example",
"def load_trajs(trajects, topology, PELE_order=True):\n trajectories = [x for x in glob.glob(trajects)]\n topologies = [x for x in glob.glob(topology)]\n if len(topologies) > 1:\n topology_extension = len(topologies[0].split(\".\")[-1])+1\n topologies = sorted(topologies, key=lambda x: int(x.split(\"_\")[-1][:-topology_extension]))\n if PELE_order:\n topologies = topologies[1:] + topologies[:1]\n extension_len = len(trajectories[0].split(\".\")[-1])+1\n trajectories = sorted(trajectories, key=lambda x: int(x.split(\"_\")[-1][:-extension_len]))\n for file_pair in zip(trajectories, itertools.cycle(topologies)):\n yield file_pair",
"def load(self):\n if self.__fname == '':\n print('You must pass in a file name to load!')\n return []\n\n ext = os.path.splitext(self.__fname)[1]\n first_pt = None\n if len(self.__fea.points) > 0:\n first_pt = self.__fea.points[0]\n if ext == '.dxf':\n parts = self.__load_dxf()\n elif ext in ['.brep', '.brp', '.iges', '.igs', '.step', '.stp']:\n self.__make_geo()\n parts = self.__load_geo()\n last_pt = None\n if first_pt != None:\n if len(self.__fea.points) > 2:\n last_pt = self.__fea.points[-1]\n if self.__scale != '':\n # call scale\n pass\n return parts",
"def get_sequence_from_file(filename: str) -> List[Sample]:\n samples = []\n\n with open(get_path() + \"/sequence/\" + filename, \"rb\") as file:\n while True:\n try:\n samples.append(pickle.load(file))\n except EOFError:\n break\n return samples",
"def load_human_sequences():\n # Define sequences list variable to store the sequences\n sequences = []\n # Open the human 9mer sequences file\n f = open(\"Human_9mer_Sequences.txt\", \"r\")\n # Store each sequence to the list\n for line in f:\n sequences.append(line.strip())\n\n return sequences",
"def load(self, input):",
"def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)",
"def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')",
"def load_trk(filename):\n trk_file = nib.streamlines.load(filename)\n return trk_file.streamlines, trk_file.header",
"def spectre_tsv3(f):\n \n skip = 0\n while True:\n try: \n wav, flux, dflux = np.loadtxt(f, skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux",
"def examples_from_file(path):\n examples = []\n\n # count total lines before loading\n total_lines = int(local('wc -l {}'.format(path), capture=True).split()[0])\n\n with codecs.open(path, 'r', encoding='utf-8') as f:\n for line in verboserate(f, desc='Reading data file.', total=total_lines):\n src, trg = line.strip().lower().split('\\t')\n src_words = src.split(' ')\n trg_words = trg.split(' ')\n assert len(src_words) > 0\n assert len(trg_words) > 0\n\n if use_diff:\n ex = EditExample.salient_diff(src_words, trg_words, free_set)\n else:\n ex = EditExample.whitelist_blacklist(src_words, trg_words)\n examples.append(ex)\n return examples",
"def load(fname):\r\n try:\r\n with open(fname, 'r') as f:\r\n V, H = (int(a) for a in next(f).split())\r\n W, i2w, w2i = np.zeros((V, H)), [], {}\r\n for i, line in enumerate(f):\r\n parts = line.split()\r\n word = parts[0].strip()\r\n w2i[word] = i\r\n W[i] = list(map(float, parts[1:]))\r\n i2w.append(word)\r\n return W, i2w, w2i, V, H\r\n except:\r\n print(\"Error: failing to load the model to the file\")",
"def load_file(path):\n with open(path, \"rb\") as f: # bsps are binary files\n byte_list = f.read() # stores all bytes in bytes1 variable (named like that to not interfere with builtin names\n header = load_header(byte_list)\n skin_names = [byte_list[header.ofs_skins + 64 * x:header.ofs_skins + 64 * x + 64].decode(\"ascii\", \"ignore\") for x in range(header.num_skins)]\n triangles = load_triangles(byte_list[header.ofs_tris:header.ofs_frames], header)\n frames = load_frames(byte_list[header.ofs_frames:header.ofs_glcmds], header)\n texture_coordinates = load_texture_coordinates(byte_list[header.ofs_st:header.ofs_tris], header)\n gl_commands = load_gl_commands(byte_list[header.ofs_glcmds:header.ofs_end])\n # print(header)\n # print(skin_names)\n # print(triangles)\n # print(frames)\n # print(texture_coordinates)\n for i in range(len(texture_coordinates)):\n texture_coordinates[i].s = texture_coordinates[i].s/header.skinwidth\n texture_coordinates[i].t = texture_coordinates[i].t / header.skinheight\n # print(texture_coordinates)\n # print(header.num_xyz)\n for i_frame in range(len(frames)):\n for i_vert in range((header.num_xyz)):\n frames[i_frame].verts[i_vert].v[0] = frames[i_frame].verts[i_vert].v[0]*frames[i_frame].scale.x+frames[i_frame].translate.x\n frames[i_frame].verts[i_vert].v[1] = frames[i_frame].verts[i_vert].v[1] * frames[i_frame].scale.y + frames[i_frame].translate.y\n frames[i_frame].verts[i_vert].v[2] = frames[i_frame].verts[i_vert].v[2] * frames[i_frame].scale.z + frames[i_frame].translate.z\n model = md2_object(header, skin_names, triangles, frames, texture_coordinates, gl_commands)\n return model",
"def sequence_loader(\n data_path: str,\n index_path: typing.Union[str, None],\n context_description: typing.Union[\n typing.List[str], typing.Dict[str, str], None\n ] = None,\n features_description: typing.Union[\n typing.List[str], typing.Dict[str, str], None\n ] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[\n typing.Tuple[\n typing.Dict[str, np.ndarray], typing.Dict[str, typing.List[np.ndarray]]\n ]\n]:\n typename_mapping = {\n \"byte\": \"bytes_list\",\n \"float\": \"float_list\",\n \"int\": \"int64_list\"\n }\n\n record_iterator = tfrecord_iterator(\n data_path=data_path,\n index_path=index_path,\n shard=shard,\n compression_type=compression_type,\n )\n\n for record in record_iterator:\n example = example_pb2.SequenceExample()\n example.ParseFromString(record)\n\n context = extract_feature_dict(example.context, context_description, typename_mapping)\n features = extract_feature_dict(example.feature_lists, features_description, typename_mapping)\n\n yield context, features",
"def spinex_sec(infile, sequence):\n return np.loadtxt(infile, usecols=[7, 5, 6], skiprows=1).reshape((1, -1, 3))",
"def spinex_psi(infile, sequence):\n return np.loadtxt(infile, usecols=4, skiprows=1).reshape((1, -1, 1))",
"def _load_files_to_unaligned_seqs(\n *,\n path: os.PathLike,\n format: Optional[str] = None,\n moltype: Optional[str] = None,\n label_to_name: Optional[Callable] = None,\n parser_kw: Optional[dict] = None,\n info: Optional[dict] = None,\n ui=None,\n) -> SequenceCollection:\n\n file_names = list(path.parent.glob(path.name))\n seqs = [\n load_seq(\n fn,\n format=format,\n moltype=moltype,\n label_to_name=label_to_name,\n parser_kw=parser_kw,\n )\n for fn in ui.series(file_names)\n ]\n return make_unaligned_seqs(\n seqs,\n label_to_name=label_to_name,\n moltype=moltype,\n source=path,\n info=info,\n )",
"def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y",
"def load_examples(path: str) -> List['InputExample']:\n with open(path, 'rb') as fh:\n return pickle.load(fh)",
"def load_examples(path: str) -> List['InputExample']:\n with open(path, 'rb') as fh:\n return pickle.load(fh)",
"def load_examples(path: str) -> List['InputExample']:\n with open(path, 'rb') as fh:\n return pickle.load(fh)",
"def load_sequence(in_file: Path) -> List[np.ndarray]:\n result = []\n\n with open(in_file, \"r\") as f:\n for line in f:\n chunks = line.split()\n assert len(chunks) == 8 # relative path + quat + trans\n guid = chunks[0].split(\"/\")[0]\n frame_idx = int(chunks[0].split(\"-\")[-1])\n assert guid == in_file.stem\n\n if len(result) < frame_idx:\n _logger.warning(\n f\"Frames {len(result)}-{frame_idx} are missing from \"\n f\"{in_file}. Adding NaN poses instead.\"\n )\n while len(result) < frame_idx:\n result.append(np.full((4, 4), np.nan))\n\n pose_quat_t = [float(x) for x in chunks[1:]]\n if np.isfinite(pose_quat_t).all():\n pose = np.eye(4)\n pose[0:3, 0:3] = transforms3d.quaternions.quat2mat(pose_quat_t[0:4])\n pose[0:3, 3] = pose_quat_t[4:7]\n else:\n pose = np.full((4, 4), np.nan)\n\n result.append(pose)\n\n return result"
]
| [
"0.6521103",
"0.62880707",
"0.61003274",
"0.59700245",
"0.58200794",
"0.5782038",
"0.57089365",
"0.56952393",
"0.56798565",
"0.5662613",
"0.5611655",
"0.5573968",
"0.5550844",
"0.5512312",
"0.5477089",
"0.54759026",
"0.5463264",
"0.54625565",
"0.5447529",
"0.5445265",
"0.5423516",
"0.54216504",
"0.5395353",
"0.53943974",
"0.5378804",
"0.5372053",
"0.53715664",
"0.53715664",
"0.53715664",
"0.53477114"
]
| 0.7215341 | 0 |
Compute average KLdivergence of all forecasters in a given time interval | def KL_apply(question):
if not question.empty:
average_forecast = question.groupby(
['Ordered.Bin.Number']
).Forecast.mean().reset_index()
forecasts = question.pivot_table(
values='Forecast',
index='Ordered.Bin.Number',
columns='Forecaster.ID'
).values
question_avg = average_forecast.Forecast.values.reshape(1, -1)
divergences = kullback_leibler(forecasts, question_avg)
return np.mean(divergences) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kl_divergence(self) -> Tensor:\n return self.variational_strategy.kl_divergence().sum(dim=1).mean()",
"def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)",
"def compute_divergence(self):\n d_tr_a = []\n d_te_a = []\n for k in self.synth_keys:\n d_tr_a.append(self.divergence('tr', k))\n d_te_a.append(self.divergence('te', k))\n\n training = np.mean(np.array(d_tr_a))\n testing = np.mean(np.array(d_te_a))\n return training, testing",
"def _cal_score_kl_divergence(self, h_mu, h_sigma, r_mu, r_sigma, t_mu, t_sigma):\n comp_sigma = h_sigma + r_sigma\n comp_mu = h_mu + r_mu\n trace_fac = (comp_sigma / t_sigma).sum(-1)\n mul_fac = ((t_mu - comp_mu) ** 2 / t_sigma).sum(-1)\n det_fac = (torch.log(t_sigma) - torch.log(comp_sigma)).sum(-1)\n return trace_fac + mul_fac + det_fac - self.hidden_size",
"def get_KL_divergence(self):\n KL_loss = 0\n if(self.Bayesian):\n for i in range(self.num_layers):\n KL_loss += getattr(self, 'LSTMCell%i'%(i+1)).get_KL_divergence()\n \n return KL_loss",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn",
"def calcAverage_g(model_type, N = 2, k = 1000):\n a = 0\n for i in range(k):\n hyp = model_type(N);\n a += hyp.w \n return a/k",
"def KLDivDF(sample_E, sample_T):\n res = [KLDiv(sample_E[col], sample_T[col]) for col in sample_E.columns]\n res = [v for v in res if not isinf(v)]\n return np.nanmean(res)",
"def kday_moving_average(x, k):\n if not isinstance(k, int):\n raise ValueError('k must be int.')\n # temp = np.append(np.zeros(k - 1), x)\n temp = np.append(np.nan*np.ones(k-1), x)\n y = np.convolve(temp, np.ones(k, dtype=int), 'valid') / k\n return y",
"def average_energy(power,events,borders,eventName):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n average = 0\n i = 0\n count = 0\n minValue = 10000\n maxValue = 0\n minAverage = 10000\n maxAverage = 0 \n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date+borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n values = power[(power['time']>=start)&(power['time']<=end)]['value']\n sum_values = sum(values)\n tot_values = len(values)\n if tot_values>0:\n if values.max() > maxValue:\n maxValue = values.max()\n if values.min() < minValue:\n minValue = values.min()\n if sum_values/tot_values > maxAverage:\n maxAverage = sum_values/tot_values\n if sum_values/tot_values < minAverage:\n minAverage = sum_values/tot_values\n average = average + sum_values/tot_values\n count += 1\n i += 1\n if count>0:\n average = average / count\n print(\"number of\", eventName ,\"in groudtruth and power=\",count)\n print(\"minValue=\",minValue,\"maxValue=\",maxValue)\n print(\"minAverage=\",minAverage,\"maxAverage=\",maxAverage)\n print(\"Final Av=\",average)\n return average\n else:\n print(\"Not values found in the range\")",
"def averageTime(self):\n \n pass",
"def gaver_stehfest(time, lap_func):\n def nCr(n, r):\n return math.factorial(n)/(math.factorial(r)*\n math.factorial(n-r))\n def a(k, n):\n summation = 0.\n for j in range((k+1)/2, min(k, n)+1):\n current_summation = float(pow(j, n+1))/float(math.factorial(n))\n current_summation *= nCr(n, j)\n current_summation *= nCr(2*j, j)\n current_summation *= nCr(j, k-j)\n summation += current_summation\n return summation*pow(-1, n+k)\n n = 7\n total_sum = a(1, n)*lap_func(1.*np.log(2.)/time)\n for k in range(2, 2*n+1):\n total_sum += a(k, n)*lap_func(k*np.log(2.)/time)\n return total_sum*np.log(2.)/time",
"def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results",
"def get_best_k_cv(air_quality_model):\n\n locations = air_quality_model.air_quality_locations\n time_series = air_quality_model.air_quality_time_series\n\n for each_location in locations:\n\n other_locations = [i for i in locations if i != each_location]\n training_time_series = time_series[other_locations]\n scaled_training_time_series = air_quality_model.scaler.transform(training_time_series)\n training_time_series_dropna = scaled_training_time_series.dropna().T\n\n # k means determine k\n distortions = []\n K = range(1, len(other_locations) + 1, 1)\n for k in K:\n kmeans = KMeans(n_clusters=k, max_iter=300).fit(training_time_series_dropna)\n # err = sum(np.min(cdist(training_time_series_dropna, kmeans.cluster_centers_, 'euclidean'), axis=1)) \\\n # / training_time_series_dropna.shape[0]\n\n # Sum of squared distances of samples to their closest cluster center\n err = kmeans.inertia_\n distortions.append(err)\n print(k, dict(zip(other_locations, kmeans.labels_)))\n print(each_location, k, 'err=', err)\n\n # Plot the elbow\n plt.figure(figsize=(15, 20))\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title(str(each_location) + ' The Elbow Method showing the optimal k')\n plt.show()",
"def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster",
"def get_Kl_divergence(model1, model2, collection, lam, missing_val = 0.0001):\n smoot_m2 = {key: (1-lam)*model2.get(key, 0) + lam*collection.get(key, missing_val) for key in model1}\n\n divergence = sum([model1[key]*math.log(model1[key]/smoot_m2[key]) for key in model1])\n return divergence",
"def test_avg_l(self):\n u_spec = leabra.UnitSpec(g_bar_e=0.3, g_bar_l=0.3, g_bar_i=1.0)\n u = leabra.Unit(spec=u_spec)\n\n for _ in range(20):\n u.add_excitatory(1.0)\n u.calculate_net_in()\n u.cycle('minus')\n\n self.assertEqual(u.avg_l, 0.40)\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(0.52, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 0.52 is the value of emergent\n\n for _ in range(100):\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(1.64, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 1.64 is the value of emergent",
"def avg_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n\n try:\n raise_if_not_multiple_spikes(intervals)\n except NoMultipleSpikesException:\n return None\n\n avg_int = np.average(intervals)\n return 1/avg_int",
"def calcStatistics(model_type, k = 1000, x = np.linspace(-1,1,1000), N = 2):\n # av_g\n av_g = calcAverage_g(model_type)\n \n # create X\n X = np.vstack((np.ones(len(x)),x)).transpose()\n \n # bias, average across whole input space\n b = np.mean((np.dot(X, av_g)-np.sin(np.pi*x))**2)\n \n # variance, average across whole input space for each data set, then average\n # this average across multiple data sets\n v = 0\n for i in range(k):\n hyp = model_type(N) # generate new hypothesis function\n v += np.mean((np.dot(X, hyp.w) - np.dot(X, av_g))**2) # average this across the entire inpute spacede\n v = v/k\n \n return b, v",
"def interval_average():\r\n import statistics as st\r\n from tach_detect import tach_detect\r\n r = request.get_json()\r\n try:\r\n email = r[\"user_email\"]\r\n except KeyError:\r\n return jsonify(\"no email input\"), 400\r\n raise LookupError(\"no email input\")\r\n check_email = Check_For_User(email)\r\n if check_email.user_exists is False:\r\n return jsonify(str(email) + \" was not found. Please re-enter\"), 400\r\n raise LookupError(str(user_email) + \" was not found. Please re-enter\")\r\n try:\r\n input_date_time = r[\"date_time\"]\r\n except KeyError:\r\n return jsonify(\"no date entered\"), 400\r\n raise LookupError(\"no date entered\")\r\n try:\r\n validate_date_time(input_date_time)\r\n except (ValueError, TypeError) as error:\r\n return jsonify(\"date entered is invalid. Please re-type.\"), 400\r\n date_time = datetime.datetime(input_date_time[0], input_date_time[1],\r\n input_date_time[2], input_date_time[3],\r\n input_date_time[4], input_date_time[5],\r\n input_date_time[6])\r\n time_list = get_all_times(email)\r\n heart_rate_list = get_all_rates(email)\r\n interval_list = find_first_date(date_time, time_list, heart_rate_list)\r\n try:\r\n interval_average_post = st.mean(interval_list)\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n except st.StatisticsError:\r\n interval_average_post = heart_rate_list[len(heart_rate_list)-1]\r\n user = models.User.objects.raw({\"_id\": email}).first()\r\n curr_age = user.age\r\n tach_test = tach_detect(curr_age, interval_average_post)\r\n return_dict = {\r\n \"user_email\": email,\r\n \"heart_rate_average_since\": str(date_time),\r\n \"heart_rate_average\": interval_average_post,\r\n \"is_heart rate_tachycardic\": str(tach_test)\r\n }\r\n return jsonify(return_dict), 200",
"def KL_divergence(value_counts1, value_counts2):\n divergence = 0\n s1 = sum([value_counts1[value] for value in value_counts1])\n s2 = sum([value_counts2[value] for value in value_counts2])\n for value in set(value_counts1).union(value_counts2):\n assert(value in value_counts1 or value in value_counts2)\n if value not in value_counts1:\n s1 += KL_SMOOTHING\n if value not in value_counts2:\n s2 += KL_SMOOTHING\n for value in set(value_counts1).union(value_counts2):\n v1 = v2 = KL_SMOOTHING\n if value in value_counts1:\n v1 = value_counts1[value]\n if value in value_counts2:\n v2 = value_counts2[value]\n v1 = float(v1) / s1\n v2 = float(v2) / s2\n divergence += v1 * math.log(v1 / v2)\n if divergence > math.e:\n divergence = math.e\n return divergence",
"def grubbs(timeseries):\r\n\r\n series = scipy.array([x[1] for x in timeseries])\r\n stdDev = scipy.std(series)\r\n mean = np.mean(series)\r\n tail_average = tail_avg(timeseries)\r\n z_score = (tail_average - mean) / stdDev\r\n len_series = len(series)\r\n threshold = scipy.stats.t.isf(.05 / (2 * len_series) , len_series - 2)\r\n threshold_squared = threshold * threshold\r\n grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))\r\n\r\n return z_score > grubbs_score",
"def pollster_errors(pollster_predictions, state_edges_actual):\r\n\r\n totalAverage = {} #Create an empty dictionary\r\n\r\n for k in pollster_predictions:\r\n states = pollster_predictions[k]\r\n for j in states:\r\n if j in state_edges_actual: \r\n average = average_error(pollster_predictions[k], state_edges_actual)\r\n totalAverage[k] = average \r\n #Map each pollster to its calculated average error of each state\r\n\r\n return totalAverage",
"def kl_divergence(self):\n return self._kl_divergence_func",
"def calc_average_load (self):\n #~ self.generation = self.forecast.generation_by_type['generation diesel']\\\n #~ [self.start_year]\n self.average_load = \\\n self.forecast.yearly_average_diesel_load.ix[self.start_year]",
"def error_in_assigned_energy(predictions, ground_truth):\n errors = {}\n both_sets_of_meters = iterate_through_submeters_of_two_metergroups(\n predictions, ground_truth)\n for pred_meter, ground_truth_meter in both_sets_of_meters:\n sections = pred_meter.good_sections()\n ground_truth_energy = ground_truth_meter.total_energy(sections=sections)\n predicted_energy = pred_meter.total_energy(sections=sections)\n errors[pred_meter.instance()] = np.abs(ground_truth_energy - predicted_energy)\n return pd.Series(errors)",
"def kl_divergence(mu, logvar):\n klds = -0.5*(1 + logvar - mu.pow(2) - logvar.exp())\n total_kld = klds.sum(1).mean(0, True)\n dimension_wise_kld = klds.mean(0)\n mean_kld = klds.mean(1).mean(0, True)\n\n return total_kld, dimension_wise_kld, mean_kld",
"def coldaverage( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n avenames = names # create an output list to average\n\n# assume only a limited range of galactic latitudes are available\n# not range above +/-60.\n use60Range = False\n minGlat = 90. # initialize to extremea\n maxGlat = -90.\n maxEl = -90.\n minEl = 90.\n ncold = 0\n\n # for all input files\n for filename in names:\n\n parts = filename.split('/')\n nparts = len(parts)\n if nparts == 1:\n aname = parts[0]\n else:\n aname = parts[nparts-1]\n\n parts = aname.split('.')\n nparts = len(parts)\n if nparts < 2:\n print 'File is not an astronomy file: ',filename\n continue\n else:\n extension = parts[nparts-1]\n\n extension = extension.upper()\n if extension != 'AST': # speed up by only looking at astronomy files\n continue\n \n rs.read_spec_ast(filename) # An observation, read values\n\n if rs.telel < 0: # only working with observations, skip elevation <= 0.\n continue\n\n maxGlat = max( rs.gallat, maxGlat)\n minGlat = min( rs.gallat, minGlat)\n maxEl = max( rs.telel, maxEl)\n minEl = min( rs.telel, minEl)\n # end for all files loop, looking for max el and latitude ranges\n\n # if any high galactic latitudes, use only above +/-60d \n if minGlat < -60. or maxGlat > 60.:\n minGlat = -60.\n maxGlat = 60.\n else: # else no high galactic latitude data\n # use highest galactic latitudes - +/-5.degrees\n if -minGlat > maxGlat: # if negative latitudes higher\n minGlat = minGlat + 5.\n maxGlat = 90.\n else: # else positive latitudes higher\n maxGlat = maxGlat - 5.\n minGlat = -90.\n\n # only use the elevations above 60 degrees, if any\n if maxEl > 60.:\n maxEl = 60.\n else:\n maxEl = maxEl - 10. #else must use highest elevations available\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if rs.telel < maxEl:\n continue\n\n if rs.gallat > maxGlat or rs.gallat < minGlat:\n avenames[ncold] = filename\n ncold = ncold + 1\n # end of for all files loop\n\n ncold, cold = average( avenames[0:ncold]) # now use generic program for averages\n if ncold < 1:\n print 'No Cold load files; can not calibrate!'\n exit()\n\n return ncold, cold, minEl, maxEl",
"def compute_average(self, error=None):\n\n nbjobs = len(self)\n if not nbjobs:\n return\n max_xsec = max(one.xsec for one in self)\n min_xsec = min(one.xsec for one in self)\n self.axsec = sum([one.axsec for one in self]) / nbjobs\n self.xsec = sum([one.xsec for one in self]) /nbjobs\n self.xerrc = sum([one.xerrc for one in self]) /nbjobs\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs\n if error:\n self.xerrc = error\n self.xerru = error\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = 0#sum([one.nw for one in self])\n self.maxit = 0#len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = sum([one.luminosity for one in self])\n self.ysec_iter = []\n self.yerr_iter = []\n self.th_maxwgt = 0.0\n self.th_nunwgt = 0 \n for result in self:\n self.ysec_iter+=result.ysec_iter\n self.yerr_iter+=result.yerr_iter\n self.yasec_iter += result.yasec_iter\n self.eff_iter += result.eff_iter\n self.maxwgt_iter += result.maxwgt_iter\n\n #check full consistency\n onefail = False\n for one in list(self):\n if one.xsec < (self.xsec - 25* one.xerru):\n if not onefail:\n logger.debug('multi run are inconsistent: %s < %s - 25* %s: assign error %s', one.xsec, self.xsec, one.xerru, error if error else max_xsec-min_xsec)\n onefail = True\n self.remove(one)\n if onefail:\n if error:\n return self.compute_average(error)\n else:\n return self.compute_average((max_xsec-min_xsec)/2.)"
]
| [
"0.62259936",
"0.5951375",
"0.58842665",
"0.58284074",
"0.5805377",
"0.57114524",
"0.5686495",
"0.55966306",
"0.55640596",
"0.553133",
"0.55185944",
"0.55090743",
"0.54649633",
"0.54555833",
"0.54146105",
"0.5384558",
"0.5382205",
"0.53703797",
"0.536366",
"0.5356504",
"0.53502005",
"0.5342721",
"0.5332758",
"0.5297893",
"0.52923137",
"0.5242022",
"0.5236192",
"0.52346236",
"0.5228656",
"0.522048"
]
| 0.63926786 | 0 |
Set the power to the specified value (in mW) | def set_power(self, power):
print('Setting santec power to %.4f mW' % power)
self.santec1.write("LP %.2f" % power)
self.santec2.write("LP %.2f" % power)
self.santec3.write("LP %.2f" % power)
self.santec4.write("LP %.2f" % power) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_power(self, value):\n self.write(\":POW {}W\".format(value))",
"def power(self, value: int):\n self._power = value",
"def set_power(self, dbm=-30):\r\n self.write(\"POW \"+str(dbm))",
"def set_power(self, dbm=-30):\r\n _debug('simq03b_api.set_power')\r\n \r\n self.write(\"POWer \"+str(dbm))",
"def set_power(self, dbm=-30):\r\n self.write(\"SOURce1:POWer:POWer \"+str(dbm))",
"def set_power(self, dbm=-30):\r\n self.write(\"SOURce1:POWer:POWer \"+str(dbm))",
"def set_power_management(value: int) -> None:",
"def set_power(power_W):\n if power_W > 0.120 : power_W = 0.120\n elif power_W < 0. : power_W = 0.\n if not (\"OK\" in cmd(\"cp\")):\n print(answer)\n shutdown()\n exit()\n if not (\"OK\" in cmd(\"p {:.4f}\".format(float(power_W)))):\n print(answer)\n shutdown()\n exit()",
"def set_power(self, dbm=-30):\r\n self.p = dbm",
"def _set_power(self, value: str):\n if value == STATE_ON:\n self.state[1] = self.state[1][:2] + '1' + self.state[1][3:]\n\n if value == STATE_OFF:\n self.state[1] = self.state[1][:2] + '0' + self.state[1][3:]",
"def get_setPower(self):\n self.read(\":POW?\")",
"def set_power(self, dbm=-30):\r\n return self._api.set_power(dbm)",
"def set_power_dbm(self, power=None):\n if power is None:\n power = self.def_power\n self.instr.write('L1 ' + str(power + ' DM'))\n time.sleep(self.sleep_time)",
"def set_powers(self, power_1, power_2):\n pass",
"def setPowerFromDensity(self):\n self.p.power = self.p.powerDensity * self.getHMMass()",
"def set_power(self, power):\n x = 0\n if power > 100:\n power = 100\n elif power < 0:\n power = 0\n if power != 0:\n while (self.__rpm < 100) and x < 3:\n time.sleep(1)\n x += 1\n if x > 3:\n print(\"Fan doesn't spinn!\")\n return\n self.__pow = power",
"def power(self, power):\n\n self._power = power",
"def _set_power(self, power: any) -> None:\n\n self.set_power(power, inplace=True)",
"def _number_dbm_changed(self, *a):\r\n self.api.set_power(self.number_dbm.get_value())",
"def set_power_unit(self, power_unit: PowerUnit) -> None:\n\n # 0: Current power units are dBm. 1: Current power units are Watts.\n if power_unit == PowerUnit.dBm:\n unit_argument = int(0)\n elif power_unit == PowerUnit.W:\n unit_argument = int(1)\n else:\n raise ValueError(\"power unit {} is not supported on this device\".format(power_unit))\n\n #:SENSe[n][:CHANnel[m]]:POWer:UNIT/?\n self._inst.write(\"SENS:POW:UNIT {}\".format(unit_argument))",
"def Incrpower(self, increment):\n self.power += increment",
"def set_power(self, power):\n\n return self._service.exposed_set_power(power)",
"def set_power_state(self, node, power_state):",
"def TL_power(self,power):\n self.write(self.headStr('TL')+'TPDB %d',power)",
"def setWeight(self, w):\n self._W = w",
"def get_power(self):\r\n x = self.query('POW?')\r\n if x == None: return None\r\n return float(x)",
"def set_W0_unit(self, value):\n if self.lf_W0.text() != \"\":\n self.set_W0() # Update for deg if needed and call comp_output\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def set_W0(self):\n if self.c_W0_unit.currentIndex() == 0: # Rad\n self.slot.W0 = self.lf_W0.value()\n else:\n self.slot.W0 = self.lf_W0.value() / 180 * pi\n self.w_out.comp_output()\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()",
"def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self",
"def get_power(self):\r\n return self.p"
]
| [
"0.8348846",
"0.7548796",
"0.74613696",
"0.7346844",
"0.7324469",
"0.7324469",
"0.725705",
"0.72232807",
"0.7219513",
"0.7158136",
"0.7115584",
"0.7085621",
"0.6954798",
"0.6945102",
"0.6909162",
"0.68312705",
"0.6814112",
"0.6677769",
"0.66506827",
"0.6535626",
"0.65175855",
"0.6415025",
"0.64147466",
"0.62988526",
"0.6199238",
"0.6182564",
"0.6143755",
"0.61410326",
"0.61267376",
"0.61171246"
]
| 0.75894326 | 1 |
Gets pictures from Curiosity Rover on a given Martian sol, with a certain camera There may be multiple pages | def get_rover_pics(sol, camera=None, page=1):
#Validation
valid_cameras = [
'FHAZ', 'RHAZ', 'MAST', 'CHEMCAM', 'MAHLI',
'MARDI', 'NAVCAM', 'PANCAM', 'MINITES'
]
if camera and camera not in valid_cameras:
raise Exception('Not a valid camera!')
params = {
'sol': sol,
'page': page,
}
args = {
'endpoint_postfix': 'mars-photos/api/v1/rovers/curiosity/photos?',
'params': params
}
api_call = Nasa(args)
api_call.place_call()
return api_call.data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_photo(mesh, movement, resolution, cmap, plotter, camera, title=None, title_location=\"upper_edge\",\n background_photos=None,cam_noise_lambda=None, background_scale=1, title_color=\"black\"):\n return Mesh.get_many_photos(mesh, movement, resolution, cmap,\n plotter, [camera], title, title_location, background_photos=background_photos,cam_noise_lambda=cam_noise_lambda,\n background_scale=background_scale, title_color=title_color)[0]",
"def page18(self):\n result = request1801.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 4 different values for token_cid found in response; the first matched\n # the last known value of token_cid - don't update the variable.\n\n grinder.sleep(98)\n request1802.GET('/Cars_Sample_App/images/cars/1.jpg')\n\n return result",
"def page7(self):\n result = request701.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 3 different values for token_cid found in response; the first matched\n # the last known value of token_cid - don't update the variable.\n\n grinder.sleep(95)\n request702.GET('/Cars_Sample_App/images/cars/1.jpg')\n\n return result",
"def get_many_photos(mesh, movement, resolution, cmap, plotter, camera, title=None, title_location=\"upper_edge\",\n background_photos=None, background_scale=1, title_color=\"black\", cam_noise_lambda=None):\n to_return = np.zeros(shape=(len(camera), resolution[1], resolution[0], 4))\n num_of_mesh = len(mesh)\n if background_photos:\n plotter.add_background_image(random.choice(background_photos), scale=background_scale)\n if cam_noise_lambda:\n cam_noise = np.zeros((len(camera), 3, 3))\n cam_noise[:,0] += np.random.normal(0, cam_noise_lambda[0], (len(camera), 3))\n cam_noise[:,1] += np.random.normal(0, cam_noise_lambda[1], (len(camera), 3))\n cam_noise[:,2] += np.random.normal(0, cam_noise_lambda[2], (len(camera), 3))\n camera = np.array(camera) + cam_noise\n\n if num_of_mesh == 1:\n mesh = [mesh]\n for i in range(num_of_mesh):\n if not mesh[i].texture:\n plotter.add_mesh(mesh[i].pv_mesh, cmap=cmap,\n name='get_photo_' + str(i))\n else:\n plotter.add_mesh(mesh[i].pv_mesh, texture=mesh[i].texture, name='get_photo_mesh_' + str(i))\n plotter.update_coordinates(movement[i], mesh=mesh[i].pv_mesh)\n if title:\n plotter.add_text(title, position=title_location, font_size=10, color=title_color, name=\"title\", shadow=True)\n plotter.set_background(color=\"white\")\n plotter.show(auto_close=False, window_size=resolution)\n for idx, cam in enumerate(camera):\n plotter.set_position(cam[0])\n plotter.set_focus(cam[1])\n plotter.set_viewup(cam[2])\n depth = plotter.get_image_depth(fill_value=None)\n depth = np.abs(depth)\n screen = plotter.screenshot(window_size=resolution)\n screen = screen / 255\n to_return[idx] = np.append(screen, depth.reshape(resolution[1], resolution[0], 1), axis=-1)\n if background_photos:\n plotter.remove_background_image()\n return np.asarray(to_return, np.float32)",
"def get_sim_images(self, urdf_file, camera_pose_path):\n self.load_urdf(urdf_file, random_pose=False)\n # self.get_plane()\n # self.change_texture(self.plane_id)\n # self.change_texture(self.object_id)\n\n self.create_camera()\n self.from_camera_pose(camera_pose_path)\n self.step(1)\n\n self.get_bgr()\n self.get_seg()\n\n if self.get_object_mask(self.object_id) is None:\n return False\n\n self.get_object_depth()\n self.crop(padding=10, random=False)\n\n print('sim img')\n\n return self.bgr, self.depth",
"def test_photos_seq(pre_pop_transaction, rover_name, sol, camera):\n from mars_street_view.models import Photo\n data = Photo.get_rov_sol(rover_name, sol)\n photos_by_cam = data.get('photos_by_cam', {})\n photos = photos_by_cam.get(camera, [])\n urls_from_method = [photo.img_src for photo in photos]\n prev_url = ''\n for url in urls_from_method:\n assert url > prev_url\n prev_url = url",
"def parse_cameras(number_of_cameras: int,\n nvm_content: List[str],\n offset: int,\n camera_id_offset: int,\n filter_list: Optional[Set[str]],\n nvm_images_path: str,\n cameras: kapture.Sensors,\n images: kapture.RecordsCamera,\n trajectories: Optional[kapture.Trajectories]) -> List[str]:\n image_idx_to_image_name = []\n # parse all cameras\n for i in range(0, number_of_cameras):\n line = nvm_content[i + offset].split()\n timestamp = i + camera_id_offset\n camera_id = f'sensor{timestamp}'\n image_file_name = line[0]\n image_idx_to_image_name.append(image_file_name)\n if filter_list is not None and image_file_name not in filter_list:\n # file_name is not in the list, do not add it\n continue\n\n focal_length = float(line[1])\n quaternion_wxyz = quaternion.from_float_array([float(v) for v in line[2:6]])\n camera_center = np.array([float(v) for v in line[6:9]])\n # https://github.com/colmap/colmap/blob/67e96894d4beed7cc93f1c0755a98d3664f85e63/src/base/reconstruction.cc#L891\n radial_distortion = -float(line[9]) # SIGN !\n\n try:\n # lazy open\n with Image.open(path.join(nvm_images_path, image_file_name)) as im:\n width, height = im.size\n except (OSError, PIL.UnidentifiedImageError):\n # It is not a valid image: skip it\n logger.info(f'Skipping invalid image file {image_file_name}')\n continue\n\n translation = - np.matmul(quaternion.as_rotation_matrix(quaternion_wxyz), camera_center)\n pose = kapture.PoseTransform(quaternion_wxyz, translation)\n\n camera = kapture.Camera(MODEL, [width, height, focal_length, width / 2, height / 2, radial_distortion])\n cameras[camera_id] = camera\n\n images[(timestamp, camera_id)] = image_file_name\n if trajectories is not None:\n trajectories[(timestamp, camera_id)] = pose\n return image_idx_to_image_name",
"def scrape_hemispheres():\n url_base = \"https://astrogeology.usgs.gov\"\n url = url_base+\"/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser = init_browser()\n\n browser.visit(url)\n time.sleep(3)\n\n # Scrape page into Soup\n html = browser.html\n soup = bs(html, \"html.parser\")\n # we look for the div describing the images\n results = soup.find_all('div', class_='description')\n# hemisphere_image_urls = []\n hemi_dico = {}\n ii = 1\n for rr in results:\n # grab the tile of the picture\n title = rr.find('h3').text \n # url to visit to get the full size imge\n url_for_pict = url_base + rr.find('a')['href']\n # go to the page where we can find the full size pict\n browser.visit(url_for_pict)\n # sleep a little bit\n time.sleep(3)\n # Scrape into soup\n html_pic = browser.html\n soup_pic = bs(html_pic, 'html.parser')\n # look for the link\n res_pic = soup_pic.find('img', class_=\"wide-image\")\n url_img = url_base+res_pic['src']\n# # Append a dict with the scraped variable in the list\n# hemisphere_image_urls.append({\"title\":title,\n# \"img_url\": url_img})\n # I found easier to manage in the html to have a dictionnary\n hemi_dico['title'+str(ii)] = title\n hemi_dico['img_url'+str(ii)] = url_img\n ii += 1 \n# break \n \n browser.quit()\n\n\n return hemi_dico",
"def page2(self):\n result = request201.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 4 different values for token_cid found in response, using the first one.\n self.token_cid = \\\n httpUtilities.valueFromBodyURI('cid') # '2'\n\n grinder.sleep(103)\n request202.GET('/Cars_Sample_App/images/cars/1.jpg')\n\n return result",
"def camera():\n return render_template('home/cam.html')",
"def get_images(self, page_number):",
"def main():\n camera = picamera.PiCamera()\n camera.resolution = (RESOLUTIONX, RESOLUTIONY)\n camera.iso = 800\n time.sleep(2)\n while True:\n camera.capture('current-image.jpg')\n adapt_steering(navigation.get_xposition('current-image.jpg'))\n time.sleep(0.4)",
"def index():\n\n # list with url for pictures\n pic_url_list = []\n\n # default resolution\n x_resolution = 2048\n y_resolution = 2048\n\n # converting flask type od dict to default python dict\n request_arguments = request.args.to_dict(flat=False)\n\n # check if optional argument (resolution) exists\n if 'rozdzielczosc' in request_arguments:\n # splitting x and y resolution values by 'x' and mapping it on int\n x_resolution, y_resolution = map(int, request_arguments['rozdzielczosc'][0].split('x'))\n\n if 'zdjecia' in request_arguments:\n pic_url_list = request_arguments['zdjecia'][0].split(',')\n\n # check if optional argument (random) exists, if so shuffle it\n if 'losowo' in request_arguments and request_arguments['losowo'][0] == '1':\n shuffle(pic_url_list)\n\n # call function which glues images together\n try:\n mosaic = make_mosaic(pic_url_list, x_resolution, y_resolution)\n mosaic.save(basepath + '/static/img/mosaic.jpeg', 'JPEG')\n return render_template('mosaic.html')\n except ValueError as e:\n return render_template('mosaic.html', message=str(e))\n except requests.exceptions.ConnectionError:\n return render_template('mosaic.html', message='Wrong image url passed or no internet access')",
"def test_photos_sorted(pre_pop_transaction, rover_name, sol, camera):\n from mars_street_view.models import Photo\n data = Photo.get_rov_sol(rover_name, sol)\n photos_by_cam = data.get('photos_by_cam', {})\n photos = photos_by_cam.get(camera, [])\n urls_from_method = [photo.img_src for photo in photos]\n assert urls_from_method == list(sorted(urls_from_method))",
"def read_cameras(self):\n for camera in self.camlist:\n image = camera.cam.read()\n if camera.vflip:\n image = cv2.flip(image, -1)\n if camera.resize_width:\n image = imutils.resize(image, width=camera.width_pixels)\n camera.cam_q.append(image)\n for detector in camera.detectors:\n self.run_detector(camera, image, detector)",
"def page11(self):\n self.token_query = \\\n 'search'\n result = request1101.POST('/Cars_Sample_App/search.do' +\n '?query=' +\n self.token_query,\n ( NVPair('criteria', 'Aston'),\n NVPair('x', '46'),\n NVPair('y', '19'), ),\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Content-Type', 'application/x-www-form-urlencoded'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 3 different values for token_cid found in response; the first matched\n # the last known value of token_cid - don't update the variable.\n\n grinder.sleep(95)\n request1102.GET('/Cars_Sample_App/images/cars/1.jpg', None,\n ( NVPair('Accept', '*/*'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result",
"def numberOfCamera():\n return numCams",
"def get_vr_photos(self, count = 30, page = 1):\n uri = 'photos/vr'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)",
"def gen(camera):\n while True:\n # frame_findline = camera.get_frame()\n frame_findline, center_Pos1, center_Pos2 = camera.get_frame()\n frame_findline = cv2.line(frame_findline, (center_Pos1, 440), (center_Pos2, 380), (255,100,0), 5)\n\n frame = cv2.imencode('.jpg', frame_findline)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')",
"def take_picture_from_camera(network_id: str, camera_serial: str) -> dict:\n data = api.get_camera_snapshot(network_id, camera_serial)\n if data.status_code != 202:\n # Mock data\n return {\n \"url\": \"https://spn4.meraki.com/stream/jpeg/snapshot/b2d123asdf423qd22d2\",\n \"expiry\": \"Access to the image will expire one day\"\n }\n\n return data.content",
"def cozmo_app(coz_conn):\n coz = coz_conn.wait_for_robot()\n coz.camera.image_stream_enabled = True\n coz_ros = CozmoRos(coz)\n coz_ros.run()",
"def getImage(cam):\n\n return cam.getImage()",
"def get_many_noisy_photos(mesh, movement, resolution, cmap, plotter, camera, title=None, title_location=\"upper_edge\",\n background_photos=None, background_scale=1, title_color=\"black\", cam_noise_lambda=None,\n texture_params=(255/2, 155, (1000, 1000, 3))):\n to_return = np.zeros(shape=(len(camera), resolution[1], resolution[0], 4))\n num_of_mesh = len(mesh)\n if background_photos:\n plotter.add_background_image(random.choice(background_photos), scale=background_scale)\n if cam_noise_lambda:\n cam_noise = np.zeros((len(camera), 3, 3))\n cam_noise[:,0] += np.random.normal(0, cam_noise_lambda[0], (len(camera), 3))\n cam_noise[:,1] += np.random.normal(0, cam_noise_lambda[1], (len(camera), 3))\n cam_noise[:,2] += np.random.normal(0, cam_noise_lambda[2], (len(camera), 3))\n camera = np.array(camera) + cam_noise\n\n if num_of_mesh == 1:\n mesh = [mesh]\n for i in range(num_of_mesh):\n tex = np.random.normal(texture_params[0], texture_params[1], texture_params[2]).astype(np.uint8)\n tex[np.where(tex > 255)] = 255\n tex[np.where(tex < 0)] = 0\n tex = pv.numpy_to_texture(tex)\n mesh[i].pv_mesh.texture_map_to_plane(inplace=True)\n plotter.add_mesh(mesh[i].pv_mesh, texture=tex, name='get_photo_mesh_' + str(i))\n plotter.update_coordinates(movement[i], mesh=mesh[i].pv_mesh)\n if title:\n plotter.add_text(title, position=title_location, font_size=10, color=title_color, name=\"title\", shadow=True)\n plotter.set_background(color=\"white\")\n plotter.show(auto_close=False, window_size=resolution)\n for idx, cam in enumerate(camera):\n plotter.set_position(cam[0])\n plotter.set_focus(cam[1])\n plotter.set_viewup(cam[2])\n depth = plotter.get_image_depth(fill_value=None)\n depth = np.abs(depth)\n screen = plotter.screenshot(window_size=resolution)\n screen = screen / 255\n to_return[idx] = np.append(screen, depth.reshape(resolution[1], resolution[0], 1), axis=-1)\n if background_photos:\n plotter.remove_background_image()\n return np.asarray(to_return, np.float32)",
"def test_camera(self, camera):\n dev = list()\n for room in self.rooms:\n for device in room[\"devices\"]:\n dev.append(device)\n return Response(self.gen_testcamera(dev[int(camera)]),\n mimetype='multipart/x-mixed-replace; boundary=frame')",
"def readImages(self):\r\n\r\n #Read the file camera.csv for the image file name\r\n lines = [line.strip() for line in open(self.cameraFile)]\r\n i = 0;\r\n\tself.centers = []\r\n\tself.lefts = []\r\n\tself.rights = []\r\n\r\n for line in lines:\r\n info = line.split(',')\r\n \r\n\r\n if info[0] == 'seq':\r\n i += 1\r\n continue\r\n \r\n if info[4] == 'left_camera':\r\n self.lefts.append(info)\r\n if info[4] == 'center_camera':\r\n self.centers.append(info)\r\n if info[4] == 'right_camera':\r\n self.rights.append(info)\r\n i += 1\r\n\r\n print \"Total Frames: %d \" % (len(self.centers))",
"def cam():\n\treturn Response(gen(camera),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame'), 200",
"def gen():\n template = cv2.imread('moi.jpg',0)\n w, h = template.shape[::-1]\n\n meth = 'cv2.TM_CCOEFF_NORMED'\n method = eval(meth)\n\n cap = cv2.VideoCapture(0)\n if (cap.isOpened() == False):\n print(\"Unabl e to read camera feed\")\n return\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n cv2.imwrite('t.jpg', frame)\n yield (b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + open('t.jpg', 'rb+').read() + b'\\r\\n')\n time.sleep( 5 )\n\n cap.release()",
"def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()",
"def gen(camera):\n \n while True:\n \n \n \n frame = camera.get_frame()\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')",
"def mri(PATH_IMG):\r\n logging.info('Running: example_camera (MorphACWE)...')\r\n # Load the image.\r\n img = imread(PATH_IMG)/255.0\r\n # Callback for visual plotting\r\n callback = visual_callback_2d(img)\r\n # Morphological Chan-Vese (or ACWE)\r\n img= ms.morphological_chan_vese(img, 35,\r\n smoothing=3, lambda1=1, lambda2=1,\r\n iter_callback=callback)\r\n return img"
]
| [
"0.6081622",
"0.6015538",
"0.6012094",
"0.58689016",
"0.5781275",
"0.5739771",
"0.5702185",
"0.5680316",
"0.556765",
"0.55674064",
"0.5559166",
"0.5465669",
"0.5456309",
"0.5448997",
"0.54273283",
"0.54250425",
"0.54247284",
"0.5424507",
"0.5420435",
"0.5417016",
"0.5371876",
"0.5364691",
"0.53636575",
"0.5360999",
"0.53448665",
"0.532389",
"0.5323514",
"0.5302338",
"0.5268578",
"0.525916"
]
| 0.74917895 | 0 |
Return a new Cursor Object using the connection. If the database does not provide a direct cursor concept, the module will have to emulate cursors using other means to the extent needed by this specification. [4] | def cursor(self):
if self._closed:
raise Error('The connection to the database has been closed.')
return Cursor(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_cursor(self):\r\n cursor = self.connection.cursor()\r\n return cursor",
"def get_cursor():\n cur = conn.cursor(cursor_factory=DictCursor)\n return cur",
"def cursor():\n dbh = handle()\n return dbh.cursor()",
"def cursor(self):\n if self.__connection is None:\n self.connect()\n return self.__connection.cursor()",
"def _cursor(self):\n cursor = self.conn.cursor()\n\n return cursor",
"def get_cursor(self):\n try:\n self.cursor = self.connection.cursor()\n logging.getLogger(__name__).info(\"Cursor was created.\")\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with cursor creating. %s\" %er)\n finally:\n return self.cursor",
"def get_cursor(self):\n self.cur = self.dbcon.cursor()\n return self.cur",
"def db_create_cursor(self, database_name):\n\n cursor = self.connections[database_name].cursor()\n return cursor",
"def __enter__(self) -> 'cursor':\n self.conn = cx_Oracle.connect(self.configuration)\n self.cursor = self.conn.cursor()\n return self.cursor",
"def cursor(self):\n cursor = Cursor(self, self.__aceQLHttpApi)\n return cursor",
"def cursor(self):\n return self._conn.cursor()",
"def __cursor(cls):\n print('|-- Richiesta cursore da:'+str(cls.__dbCon))\n return cls.__dbCon.cursor( cursor_factory = psycopg2.extras.DictCursor )",
"def create_cursor(self):\n c = self._dbapi_connection.cursor()\n c.arraysize = 100\n return c",
"def __enter__(self) -> 'DBcursor':\n self.conn = connector.connect(**self.dbconfig)\n self.cursor = self.conn.cursor()\n return self.cursor",
"def new_cursor(self, db, collection):\n if self._mongo_client == None:\n raise Exception(\"mongo_client is None\")\n cursor = self._mongo_client[db][collection]\n return cursor",
"def cursor(self) -> NamedTupleCursor:\n return self.connection.cursor",
"def _get_cursor(self):\n conn = self._connect()\n conn.autocommit = True\n cursor = conn.cursor()\n return cursor",
"def get_cursor():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect_db()\n return db.cursor()",
"def cdr_cursor(self):\n\n if not hasattr(self, \"_cdr_cursor\"):\n opts = dict(tier=self.tier, user=\"CdrGuest\")\n self._cdr_cursor = db.connect(**opts).cursor()\n return self._cdr_cursor",
"def get_cursor():\n return _thread_local.connection.cursor()",
"def cursor(self):\n with self.connection() as conn:\n cursor = conn.cursor(prepared=True)\n try:\n yield cursor\n finally:\n cursor.close()",
"def get_database_cursor(conn=None):\n\n if not conn:\n conn = get_database_connection()\n\n return conn.cursor()",
"def get_cursor(self):\n return self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)",
"def get_cursor(file_name):\n con = lite.connect(file_name)\n con.row_factory = lite.Row\n return con.cursor()",
"def get_cursor(self):\n return self.connection.cursor()",
"def managed_cursor(self, cursor_factory=None):\n\n self.conn_url = (f'postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}')\n self.conn = psycopg2.connect(self.conn_url)\n self.conn.autocommit = True\n self.curr = self.conn.cursor(cursor_factory=cursor_factory)\n try:\n yield self.curr\n finally:\n self.curr.close()\n self.conn.close()",
"def cursor(self):\n return self.conn.cursor()",
"def getCursor(self) -> sqlite3:\n return self.cursor",
"def cursor(self):\n with self.conn as c:\n yield c.cursor()",
"def cursor(self):\n with self.conn as c:\n yield c.cursor()"
]
| [
"0.78827614",
"0.7587127",
"0.75584066",
"0.73682237",
"0.7349129",
"0.73291755",
"0.7260006",
"0.7240816",
"0.7236439",
"0.72060776",
"0.71288323",
"0.71187013",
"0.7104072",
"0.7073766",
"0.70614487",
"0.7044208",
"0.7023463",
"0.70225805",
"0.69576836",
"0.69546443",
"0.69399583",
"0.69283503",
"0.69059426",
"0.6875434",
"0.68606293",
"0.68598765",
"0.6837516",
"0.6808449",
"0.67570513",
"0.67570513"
]
| 0.8004369 | 0 |
Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters. Modules are free to implement this method using multiple calls to the execute() method or by using array operations to have the database process the sequence as a whole in one call. Use of this method for an operation which produces one or more result sets constitutes undefined behavior, and the implementation is permitted (but not required) to raise an exception when it detects that a result set has been created by an invocation of the operation. The same comments as for execute() also apply accordingly to this method. Return values are not defined. | def executemany(self, operation, seq_of_parameters):
if self._closed:
raise Error('The cursor has been closed.')
if self.connection._closed:
raise Error('The connection to the database has been closed.')
for parameters in seq_of_parameters:
self.execute(operation, parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _executeprepared(self, operation, parameters):\n # Create a statement handle\n p_statement = self._statement_cache.get_prepared_statement(operation)\n\n if p_statement.parameter_count != len(parameters):\n raise ProgrammingError(\"Incorrect number of parameters specified, expected %d, got %d\" %\n (p_statement.parameter_count, len(parameters)))\n\n # Use handle to query\n return self.session.execute_prepared_statement(p_statement, parameters)",
"def executemany(self, operation, seq_of_parameters):\n self._check_closed()\n\n p_statement = self._statement_cache.get_prepared_statement(operation)\n self.session.execute_batch_prepared_statement(p_statement, seq_of_parameters)",
"def execute(self, operation, parameters=None):\n self._check_closed()\n self._reset()\n self.__query = operation\n\n if parameters is None:\n exec_result = self._execute(operation)\n else:\n exec_result = self._executeprepared(operation, parameters)\n\n self.rowcount = exec_result.row_count\n if exec_result.result > 0:\n self._result_set = self.session.fetch_result_set(exec_result.statement)\n self.description = self.session.fetch_result_set_description(self._result_set)\n\n # TODO: ???\n if self.rowcount < 0:\n self.rowcount = -1\n self.rownumber = 0",
"async def executemany(self, operation, seq_of_parameters):\n\n count = 0\n for parameters in seq_of_parameters:\n count += await self.execute(operation, parameters)\n self.rowcount = count\n return count",
"def execute(self, stmt, params=(), **kwargs):\n if kwargs:\n params = kwargs\n # do any substitutions\n if params:\n if isinstance(params, (tuple, list, set)):\n newpar = []\n for par in params:\n newpar.append(convert_PROCEDURES(par))\n params = newpar\n elif isinstance(params, dict):\n for k, val in params.items():\n params[k] = convert_PROCEDURES(val)\n # if the statement was given do any conversions\n if stmt:\n if stmt.startswith('commit'):\n return None\n exstmt = self.replacevals(stmt)\n if exstmt is None:\n return None\n\n return super(MockCursor, self).execute(convert_PROCEDURES(exstmt), params)\n return super(MockCursor, self).execute(self._stmt, params)",
"def execute(self, sql, parameters=None):\n if parameters is None:\n parameters = {}\n self._cursor = iter(self._hndl.execute(sql, parameters))\n return self",
"def execute(self, operation, parameters=[]):\r\n if self._closed:\r\n raise Error('The cursor has been closed.')\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n # start web.database\r\n if self.debug:\r\n self.sql.append(sql)\r\n # end web.database\r\n if type(parameters) not in [type(()), type([])]:\r\n parameters = [parameters]\r\n parsedSQL = self.connection.parser.parse(operation)\r\n if parsedSQL['function'] == 'create':\r\n self.info = self.connection._create(parsedSQL['table'], parsedSQL['columns'], parameters)\r\n elif parsedSQL['function'] == 'drop':\r\n self.info = self.connection._drop(parsedSQL['tables'])\r\n elif parsedSQL['function'] == 'insert':\r\n self.info = self.connection._insert(parsedSQL['table'], parsedSQL['columns'], parsedSQL['sqlValues'], parameters)\r\n elif parsedSQL['function'] == 'update':\r\n if parsedSQL.has_key('where'):\r\n self.info = self.connection._update(parsedSQL['table'], parsedSQL['columns'], parsedSQL['where'], parsedSQL['sqlValues'], parameters)\r\n else:\r\n self.info = self.connection._update(parsedSQL['table'], parsedSQL['columns'], sqlValues = parsedSQL['sqlValues'], values = parameters)\r\n elif parsedSQL['function'] == 'select':\r\n del parsedSQL['function']\r\n return self.select(**parsedSQL)\r\n elif parsedSQL['function'] == 'delete':\r\n if parsedSQL.has_key('where'):\r\n self.info = self.connection._delete(parsedSQL['table'], parsedSQL['where'], parameters)\r\n else:\r\n self.info = self.connection._delete(parsedSQL['table'], values=parameters)\r\n elif parsedSQL['function'] == 'show':\r\n self.info = self.connection._showTables()\r\n else:\r\n raise SQLError(\"%s is not a supported keyword.\"%parsedSQL['function'].upper())\r\n self.position = 0",
"def invoke_statement(\n self,\n statement: Optional[Executable] = None,\n params: Optional[_CoreAnyExecuteParams] = None,\n execution_options: Optional[OrmExecuteOptionsParameter] = None,\n bind_arguments: Optional[_BindArguments] = None,\n ) -> Result[Any]:\n\n if statement is None:\n statement = self.statement\n\n _bind_arguments = dict(self.bind_arguments)\n if bind_arguments:\n _bind_arguments.update(bind_arguments)\n _bind_arguments[\"_sa_skip_events\"] = True\n\n _params: Optional[_CoreAnyExecuteParams]\n if params:\n if self.is_executemany:\n _params = []\n exec_many_parameters = cast(\n \"List[Dict[str, Any]]\", self.parameters\n )\n for _existing_params, _new_params in itertools.zip_longest(\n exec_many_parameters,\n cast(\"List[Dict[str, Any]]\", params),\n ):\n if _existing_params is None or _new_params is None:\n raise sa_exc.InvalidRequestError(\n f\"Can't apply executemany parameters to \"\n f\"statement; number of parameter sets passed to \"\n f\"Session.execute() ({len(exec_many_parameters)}) \"\n f\"does not match number of parameter sets given \"\n f\"to ORMExecuteState.invoke_statement() \"\n f\"({len(params)})\"\n )\n _existing_params = dict(_existing_params)\n _existing_params.update(_new_params)\n _params.append(_existing_params)\n else:\n _params = dict(cast(\"Dict[str, Any]\", self.parameters))\n _params.update(cast(\"Dict[str, Any]\", params))\n else:\n _params = self.parameters\n\n _execution_options = self.local_execution_options\n if execution_options:\n _execution_options = _execution_options.union(execution_options)\n\n return self.session._execute_internal(\n statement,\n _params,\n execution_options=_execution_options,\n bind_arguments=_bind_arguments,\n _parent_execute_state=self,\n )",
"def execute(self, sql, params=None):\n if params and not isinstance(params, Mapping):\n raise TypeError(\"Expected dict or other mapping object\")\n\n cursor = self.cursor()\n sql, params = utils.change_param_style(self.driver.paramstyle, sql, params)\n cursor.execute(sql, params)\n return cursor",
"def executemany(self, stmt, params):\n if params:\n if isinstance(params, (tuple, list, set)):\n newpar = []\n for par in params:\n newpar.append(convert_PROCEDURES(par))\n params = newpar\n elif isinstance(params, dict):\n for k, val in params.items():\n params[k] = convert_PROCEDURES(val)\n\n if stmt:\n exstmt = self.replacevals(stmt)\n if exstmt is None:\n return None\n #print exstmt\n #print params\n return super(MockCursor, self).executemany(convert_PROCEDURES(exstmt), params)\n return super(MockCursor, self).executemany(self._stmt, params)",
"def dbExecute(con, statement, args=[], skipTrace=False):\n cursor = con.cursor()\n stmt = cursor.mogrify(statement, args);\n if not skipTrace:\n trace(\"executing:\" + str(stmt))\n cursor.execute(stmt)\n global quiet\n if not skipTrace:\n trace(\"statusmessage=\" + cursor.statusmessage + \", rowcount=\" + str(cursor.rowcount))\n return cursor",
"def _execute(self, *args):\n cursor = self.db.cursor()\n cursor.execute(*args)\n return cursor",
"async def execute(self, operation, parameters=None):\n # type: (str, Optional[Dict]) -> int\n\n if not self.connection:\n self._exception_handler(ProgrammingError, \"cursor is closed\")\n\n # clear message history\n self.messages = []\n\n # set the number of rows to fetch\n if self.arraysize != self.connection.replysize:\n self.connection.set_replysize(self.arraysize)\n\n if operation == self.operation:\n # same operation, DBAPI mentioned something about reuse\n # but monetdb doesn't support this\n pass\n else:\n self.operation = operation\n\n query = \"\"\n if parameters:\n if isinstance(parameters, dict):\n query = operation % {\n k: monetize.convert(v) for (k, v) in parameters.items()\n }\n elif type(parameters) == list or type(parameters) == tuple:\n query = operation % tuple(\n [monetize.convert(item) for item in parameters]\n )\n elif isinstance(parameters, str):\n query = operation % monetize.convert(parameters)\n else:\n msg = \"Parameters should be None, dict or list, now it is %s\"\n self._exception_handler(ValueError, msg % type(parameters))\n else:\n query = operation\n try:\n block = await self.connection.execute(query)\n self._store_result(block)\n self.rownumber = 0\n self._executed = operation\n except Exception as e:\n print(\n \"\\nhost: \",\n self.connection.hostname,\n \"\\ndatabase: \",\n self.connection.database,\n \"\\nquery: \",\n operation,\n \"\\nexception: \",\n str(e),\n \"\\n\\n\",\n )\n raise\n print(\n \"\\nhost: \",\n self.connection.hostname,\n \"\\ndatabase: \",\n self.connection.database,\n \"\\nquery: \",\n operation,\n \"\\n\\n\",\n )\n return self.rowcount",
"def execute_many(self, query, parameters):\n return self._executemany_lastrowid(query, parameters)",
"def execute_query_sequence(db_cursor, all_queries):\n\n for query in all_queries:\n db_cursor.execute(query)",
"def _execute(self,\n native,\n command,\n data=None,\n returning=True,\n mapper=dict):\n if data is None:\n data = {}\n\n with native.cursor() as cursor:\n log.debug('***********************')\n log.debug(command % data)\n log.debug('***********************')\n\n try:\n rowcount = 0\n for cmd in command.split(';'):\n cmd = cmd.strip()\n if cmd:\n cursor.execute(cmd.strip(';') + ';', data)\n rowcount += cursor.rowcount\n\n # look for a disconnection error\n except pymysql.InterfaceError:\n raise orb.errors.ConnectionLost()\n\n # look for integrity errors\n except (pymysql.IntegrityError, pymysql.OperationalError) as err:\n native.rollback()\n\n # look for a duplicate error\n if err[0] == 1062:\n raise orb.errors.DuplicateEntryFound(err[1])\n\n # look for a reference error\n reference_error = re.search('Key .* is still referenced from table \".*\"', nstr(err))\n if reference_error:\n msg = 'Cannot remove this record, it is still being referenced.'\n raise orb.errors.CannotDelete(msg)\n\n # unknown error\n log.debug(traceback.print_exc())\n raise orb.errors.QueryFailed(command, data, nstr(err))\n\n # connection has closed underneath the hood\n except pymysql.Error as err:\n native.rollback()\n log.error(traceback.print_exc())\n raise orb.errors.QueryFailed(command, data, nstr(err))\n\n try:\n raw = cursor.fetchall()\n results = [mapper(record) for record in raw]\n except pymysql.ProgrammingError:\n results = []\n\n return results, rowcount",
"def _execute(self, db):\n raise NotImplementedError",
"def execute(self, operation, args=None, stream=None):\n\t\toperations = (op.strip() for op in operation.split(';') if len(op.strip()) > 0)\n\t\tfor op in operations:\n\t\t\tself._cursor.execute(op, args, stream)",
"def _execute(self, operation):\n # Use handle to query\n return self.session.execute_statement(self._statement_cache.get_statement(), operation)",
"def db_execute(self, database_name, statement, params):\n with self.db_create_cursor(database_name) as cursor:\n if self.debug:\n self.logger.debug(\"Running statement: \" + statement)\n return cursor.execute(statement, params)",
"def execute(self):\n if self.sql is None:\n self.sql = self.construct_query()\n # Only SQL strings can be split, not (e.g.) SQLAlchemy statements.\n if self.multiple_statements and isinstance(self.sql, str):\n statements = self._split_sql()\n else:\n statements = [self.sql]\n single_statement = True if len(statements) == 1 and self.filename else False\n try:\n for statement in statements:\n result_proxy = self.cm.conn.execute(statement)\n log_string = self.filename if single_statement else str(statement)[:25]\n self.logger.info(\"Executed {} against {}\".format(log_string, self.cm.db))\n if result_proxy.cursor:\n return self.fetch_results(result_proxy)\n except Exception as e:\n self.logger.exception(e)\n raise",
"def execute(self, query, pars=False):\n raise NotImplementedError",
"def query(self, sql_query, vars=None, processed=False, _test=False):\n if vars is None: vars = {}\n \n if not processed and not isinstance(sql_query, SQLQuery):\n sql_query = reparam(sql_query, vars)\n \n if _test: return sql_query\n \n db_cursor = self._db_cursor()\n self._db_execute(db_cursor, sql_query)\n \n if db_cursor.description:\n names = [x[0] for x in db_cursor.description]\n def iterwrapper():\n row = db_cursor.fetchone()\n while row:\n yield storage(dict(zip(names, row)))\n row = db_cursor.fetchone()\n out = iterbetter(iterwrapper())\n out.__len__ = lambda: int(db_cursor.rowcount)\n out.list = lambda: [storage(dict(zip(names, x))) \\\n for x in db_cursor.fetchall()]\n else:\n out = db_cursor.rowcount\n \n if not self.ctx.transactions: \n self.ctx.commit()\n return out",
"def execute_many(self, sql, args=None):\r\n args = args or None\r\n with Executer(self) as cursor:\r\n rows = cursor.executemany(sql, args)\r\n return rows",
"def execute_query(self, *args, **kwargs):",
"def execute(\n self,\n query: Query,\n params: Optional[Params] = None,\n *,\n prepare: Optional[bool] = None,\n binary: bool = False,\n ) -> Cursor[Row]:\n cur = self.cursor()\n if binary:\n cur.format = Format.BINARY\n\n try:\n return cur.execute(query, params, prepare=prepare)\n except e.Error as ex:\n raise ex.with_traceback(None)",
"def execute_param(cursor, query, param):\n while True:\n try:\n cursor.execute(query, param)\n break\n except Exception as e:\n print(\"Database query: {} {} {}\".format(cursor, query, param))\n print(\"Database retry reason: {}\".format(e))\n time.sleep(random.random())\n return cursor",
"def _execute_query(self, query, values):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(query,values)\n if not cursor.description:\n return []\n rs = RecordSet(initialData=cursor.fetchall(), recordType=next(zip(*cursor.description)))\n return rs",
"def __execsql(self, sql, seq):\n return self.sqldb.executemany(sql, [x._asdict() for x in seq])",
"def execute(self, query_string, params=None, many_mode=False):\n if not self.connection:\n self.close()\n self._free_stmt(SQL_CLOSE)\n if params:\n # If parameters exist, first prepare the query then executed with parameters\n if not isinstance(params, (tuple, list)):\n raise TypeError(\"Params must be in a list, tuple\")\n if query_string != self.statement:\n # if the query is not same as last query, then it is not prepared\n self.prepare(query_string)\n param_types = [get_type(_) for _ in params]\n if self._last_param_types is None or len(param_types) != len(self._last_param_types) or any(p_type[0] != 'N' and p_type != self._last_param_types[i] for i, p_type in enumerate(param_types)):\n self._free_stmt(SQL_RESET_PARAMS)\n self._bind_params(param_types)\n # With query prepared, now put parameters into buffers\n col_num = 0\n for param_buffer, param_buffer_len, sql_type in self._param_buffer_list:\n c_char_buf, c_buf_len = '', 0\n param_val = params[col_num]\n param_types_0 = param_types[col_num][0]\n if param_types_0 in {'N', 'BN'}:\n param_buffer_len.value = SQL_NULL_DATA\n col_num += 1\n continue\n elif param_types_0 in {'i', 'l', 'f'}:\n c_char_buf = bytes(str(param_val), 'ascii')\n c_buf_len = len(c_char_buf)\n elif param_types_0 in {'s', 'S'}:\n c_char_buf = param_val\n c_buf_len = len(c_char_buf)\n elif param_types_0 in {'u', 'U'}:\n c_char_buf = ucs_buf(param_val)\n c_buf_len = len(c_char_buf)\n elif param_types_0 == 'dt':\n c_char_buf = bytes(param_val.strftime('%Y-%m-%d %H:%M:%S.%f')[:self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]], 'ascii')\n c_buf_len = len(c_char_buf)\n # print c_buf_len, c_char_buf\n elif param_types_0 == 'd':\n c_char_buf = bytes(param_val.isoformat()[:self.connection.type_size_dic[SQL_TYPE_DATE][0] if SQL_TYPE_DATE in self.connection.type_size_dic else 10], 'ascii')\n c_buf_len = len(c_char_buf)\n # print c_char_buf\n elif param_types_0 == 't':\n if SQL_TYPE_TIME in self.connection.type_size_dic:\n c_char_buf = param_val.isoformat()[:self.connection.type_size_dic[SQL_TYPE_TIME][0]]\n c_buf_len = len(c_char_buf)\n elif SQL_SS_TIME2 in self.connection.type_size_dic:\n c_char_buf = param_val.isoformat()[:self.connection.type_size_dic[SQL_SS_TIME2][0]]\n c_buf_len = len(c_char_buf)\n else:\n c_buf_len = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]\n time_str = param_val.isoformat()\n if len(time_str) == 8:\n time_str += '.000'\n c_char_buf = f'1900-01-01 {time_str[:c_buf_len - 11]}'\n c_char_buf = bytes(c_char_buf, 'ascii')\n # print c_buf_len, c_char_buf\n elif param_types_0 == 'b':\n c_char_buf = bytes('1' if param_val else '0', 'ascii')\n c_buf_len = 1\n elif param_types_0 == 'D': # Decimal\n digit_string = ''.join(str(x) for x in param_val.as_tuple()[1])\n digit_num, dec_num = param_types[col_num][1]\n if dec_num > 0:\n # has decimal\n # 1.12 digit_num = 3 dec_num = 2\n # 0.11 digit_num = 2 dec_num = 2\n # 0.01 digit_num = 1 dec_num = 2\n v = f'{param_val.as_tuple()[0] == 0 and \"+\" or \"-\"}{digit_string[:digit_num - dec_num]}.{digit_string[0 - dec_num:].zfill(dec_num)}'\n else:\n # no decimal\n v = f'{digit_string}{\"0\" * (0 - dec_num)}'\n c_char_buf = bytes(v, 'ascii')\n c_buf_len = len(c_char_buf)\n elif param_types_0 == 'bi':\n c_char_buf = bytes(param_val)\n c_buf_len = len(c_char_buf)\n else:\n c_char_buf = param_val\n if param_types_0 == 'bi':\n param_buffer.raw = bytes(param_val)\n else:\n # print (type(param_val), param_buffer, param_buffer.value)\n param_buffer.value = c_char_buf\n if param_types_0 in {'U', 'u', 'S', 's'}:\n # ODBC driver will find NUL in unicode and string to determine their length\n param_buffer_len.value = -3\n else:\n param_buffer_len.value = c_buf_len\n col_num += 1\n ret = SQL_EXECUTE(self.stmt_h)\n if ret != SQL_SUCCESS:\n # print param_valparam_buffer, param_buffer.value\n check_success(self, ret)\n if not many_mode:\n self._update_desc()\n else:\n self._free_stmt()\n self._last_param_types = None\n self.statement = None\n if isinstance(query_string, str):\n ret = ODBC_API.SQLExecDirectW(self.stmt_h, WCHAR_POINTER(ucs_buf(query_string)), len(query_string))\n else:\n ret = ODBC_API.SQLExecDirect(self.stmt_h, ctypes.c_char_p(query_string), len(query_string))\n check_success(self, ret)\n self._update_desc()\n return self"
]
| [
"0.73606586",
"0.73441",
"0.66620415",
"0.6475693",
"0.63031626",
"0.62294924",
"0.6206333",
"0.6151082",
"0.607004",
"0.6042311",
"0.60204035",
"0.6016212",
"0.6000834",
"0.5983184",
"0.5975359",
"0.5939904",
"0.5913451",
"0.5891441",
"0.5879333",
"0.58531886",
"0.58390516",
"0.5784168",
"0.5758717",
"0.5736462",
"0.5720419",
"0.56992364",
"0.56914127",
"0.56748796",
"0.56558394",
"0.5649425"
]
| 0.7611646 | 0 |
This can be used before a call to executeXXX() to predefine memory areas for the operation's parameters. sizes is specified as a sequence one item for each input parameter. The item should be a Type Object that corresponds to the input that will be used, or it should be an integer specifying the maximum length of a string parameter. If the item is None, then no predefined memory area will be reserved for that column (this is useful to avoid predefined areas for large inputs). This method would be used before the executeXXX() method is invoked. Implementations are free to have this method do nothing and users are free to not use it. | def setinputsizes(self, sizes):
if self._closed:
raise Error('The cursor has been closed.')
if self.connection._closed:
raise Error('The connection to the database has been closed.')
else:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setinputsizes(self, sizes):\n pass",
"def __init__(__self__, *,\n size: pulumi.Input[int]):\n pulumi.set(__self__, \"size\", size)",
"def _assign_sizes(self):",
"def set_tunable_parameters(self, input_type='default', reduced_size=64, **kwargs):\n self.logger.info(\"Set parameters: input_type=%s, reduced_size=%s\", input_type, reduced_size)\n self.input_type = input_type\n self.reduced_size = reduced_size",
"def size(self, size_input: Tuple[str, str]):\n self.isize = [UIMetric.parse(size_input[0]),\n UIMetric.parse(size_input[1])]",
"def __process_x_size_arg(self, argument):\n _method_name = '__process_x_size_arg'\n\n match = self.__x_args_size_regex.match(argument)\n xarg = match.group(1)\n xvalue = match.group(3)\n if 'size' not in self.__x_args:\n self.__x_args['size'] = OrderedDict()\n self._logger.finer('WLSDPLY-08301', argument, xarg, xvalue,\n class_name=self._class_name, method_name=_method_name)\n self.__x_args['size'][xarg] = xvalue",
"def new_param(self, size:int=1, as_array=False) -> sp.Symbol:\n if size < 1:\n raise ValueError('Number of new parameters created must be greater than zero')\n \n if self.parameter_index is None:\n start = self.num_param\n else:\n start = self.parameter_index\n self._parameter_index += size\n \n params = np.array([sp.Symbol('%s_%s' % (self.parameter_symbol, i)) \\\n for i in range(start, start+size)])\n all_params = np.append(self._parameters, params)\n self._parameters = np.array(get_unique_symbols(all_params))\n \n if (len(params) == 1) and (not as_array):\n return params[0]*self.parameter_scale\n\n return params*self.parameter_scale",
"def _bind_params(self, param_types, pram_io_list=[]):\n # Clear the old Parameters\n if not self.connection:\n self.close()\n # self._free_results(NO_FREE_STATEMENT)\n # Get the number of query parameters judged by database.\n num_params = C_SHORT()\n ret = ODBC_API.SQLNumParams(self.stmt_h, ADDR(num_params))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n if len(param_types) != num_params.value:\n # In case number of parameters provided do not same as number required\n raise ProgrammingError('HY000', f'The SQL contains {num_params.value} parameter markers, but {len(param_types)} parameters were supplied')\n # Every parameter needs to be binded to a buffer\n param_buffer_list = []\n # Temporary holder since we can only call SQLDescribeParam before calling SQLBindParam.\n temp_holder = []\n for col_num in range(num_params.value):\n dec_num = 0\n sql_c_type = SQL_C_CHAR\n param_types_0 = param_types[col_num][0]\n if param_types_0 == 'u':\n sql_c_type = SQL_C_WCHAR\n sql_type = SQL_WVARCHAR\n # allocate two bytes for each char due to utf-16-le encoding\n buf_size = 255 * 2\n parameter_buffer = CREATE_BUFFER_U(buf_size)\n elif param_types_0 == 's':\n sql_type = SQL_VARCHAR\n buf_size = 255\n elif param_types_0 == 'U':\n sql_c_type = SQL_C_WCHAR\n sql_type = SQL_WLONGVARCHAR\n buf_size = param_types[col_num][1] # len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500\n parameter_buffer = CREATE_BUFFER_U(buf_size)\n elif param_types_0 == 'S':\n sql_type = SQL_LONGVARCHAR\n buf_size = param_types[col_num][1] # len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500\n # bool subclasses int, thus has to go first\n elif param_types_0 == 'b':\n sql_type = SQL_BIT\n buf_size = SQL_DATA_TYPE_DICT[sql_type][4]\n elif param_types_0 == 'i':\n sql_type = SQL_INTEGER\n buf_size = SQL_DATA_TYPE_DICT[sql_type][4]\n elif param_types_0 == 'l':\n sql_type = SQL_BIGINT\n buf_size = SQL_DATA_TYPE_DICT[sql_type][4]\n elif param_types_0 == 'D': # Decimal\n sql_type = SQL_NUMERIC\n digit_num, dec_num = param_types[col_num][1]\n if dec_num > 0:\n # has decimal\n # 1.23 as_tuple -> (1,2,3),-2\n # 1.23 digit_num = 3 dec_num = 2\n # 0.11 digit_num = 2 dec_num = 2\n # 0.01 digit_num = 1 dec_num = 2\n buf_size = dec_num if dec_num > digit_num else digit_num\n else:\n # no decimal\n buf_size = digit_num - dec_num\n dec_num = 0\n parameter_buffer = CREATE_BUFFER(buf_size + 4) # add extra length for sign and dot\n elif param_types_0 == 'f':\n sql_type = SQL_DOUBLE\n buf_size = SQL_DATA_TYPE_DICT[sql_type][4]\n # datetime subclasses date, thus has to go first\n elif param_types_0 == 'dt':\n sql_type = SQL_TYPE_TIMESTAMP\n buf_size = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]\n dec_num = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][1]\n elif param_types_0 == 'd':\n if SQL_TYPE_DATE in self.connection.type_size_dic:\n # if DEBUG:print('conx.type_size_dic.has_key(SQL_TYPE_DATE)')\n sql_type = SQL_TYPE_DATE\n buf_size = self.connection.type_size_dic[SQL_TYPE_DATE][0]\n dec_num = self.connection.type_size_dic[SQL_TYPE_DATE][1]\n else:\n # SQL Sever <2008 doesn't have a DATE type.\n sql_type = SQL_TYPE_TIMESTAMP\n buf_size = 10\n elif param_types_0 == 't':\n if SQL_TYPE_TIME in self.connection.type_size_dic:\n sql_type = SQL_TYPE_TIME\n buf_size = self.connection.type_size_dic[SQL_TYPE_TIME][0]\n dec_num = self.connection.type_size_dic[SQL_TYPE_TIME][1]\n elif SQL_SS_TIME2 in self.connection.type_size_dic:\n # TIME type added in SQL Server 2008\n sql_type = SQL_SS_TIME2\n buf_size = self.connection.type_size_dic[SQL_SS_TIME2][0]\n dec_num = self.connection.type_size_dic[SQL_SS_TIME2][1]\n else:\n # SQL Sever <2008 doesn't have a TIME type.\n sql_type = SQL_TYPE_TIMESTAMP\n buf_size = self.connection.type_size_dic[SQL_TYPE_TIMESTAMP][0]\n dec_num = 3\n elif param_types_0 == 'BN':\n sql_c_type = SQL_C_BINARY\n sql_type = SQL_VARBINARY\n buf_size = 1\n elif param_types_0 == 'N':\n if len(self._param_sql_type_list) > 0:\n sql_c_type = 99\n sql_type = self._param_sql_type_list[col_num][0]\n buf_size = 1\n else:\n sql_type = SQL_CHAR\n buf_size = 1\n elif param_types_0 == 'bi':\n sql_c_type = SQL_C_BINARY\n sql_type = SQL_LONGVARBINARY\n buf_size = param_types[col_num][1] # len(self._inputsizers)>col_num and self._inputsizers[col_num] or 20500\n else:\n sql_type = SQL_LONGVARCHAR\n buf_size = len(self._inputsizers) > col_num and self._inputsizers[col_num] or 20500\n parameter_buffer = locals().get('parameter_buffer', CREATE_BUFFER(buf_size))\n temp_holder.append((sql_c_type, sql_type, buf_size, dec_num, parameter_buffer))\n del parameter_buffer\n for col_num, (sql_c_type, sql_type, buf_size, dec_num, parameter_buffer) in enumerate(temp_holder):\n len_or_ind_buf = C_SSIZE_T()\n input_output_type = 1\n if len(pram_io_list) > col_num:\n input_output_type = pram_io_list[col_num]\n ret = ODBC_API.SQLBindParameter(self.stmt_h, col_num + 1, input_output_type, sql_c_type, sql_type, buf_size, dec_num, ADDR(parameter_buffer), C_SSIZE_T(buf_size), ADDR(len_or_ind_buf))\n if ret != SQL_SUCCESS:\n check_success(self, ret)\n # Append the value buffer and the length buffer to the array\n param_buffer_list.append((parameter_buffer, len_or_ind_buf, sql_type))\n self._last_param_types = param_types\n self._param_buffer_list = param_buffer_list",
"def ior_param_update(self, oclass, sizes):\n self.ior_cmd.block_size.update(sizes[1])\n self.ior_cmd.transfer_size.update(sizes[2])\n self.ior_cmd.dfs_oclass.update(oclass[0])\n self.ior_cmd.dfs_dir_oclass.update(oclass[0])\n self.ior_cmd.dfs_chunk.update(sizes[0])",
"def size(self, size={}):\n # type: (dict) -> Entity\n if not size:\n return\n for s in ['width', 'height']:\n if s in size:\n self.type_def[s] = int(size[s])\n\n return self",
"def auto_populate_parameters(self):\n run_arguments = get_func_arguments(self.run)\n\n if not run_arguments:\n return\n\n # ignore 'self' argument, should be safe-ish\n if \"self\" in list(run_arguments.keys()):\n run_arguments.pop(\"self\")\n\n for param_name, default_value in run_arguments.items():\n is_required = default_value == RequiresValueType\n if is_required:\n run_arguments[param_name] = str() # fill to make sure every argument has something\n\n if run_arguments:\n self.param_grid.from_data(run_arguments)\n self._parameters_auto_generated = True",
"def setoutputsize(self, size, column=None):\n pass",
"def set_sizes(self, sizes):\n self._sizes = sizes",
"def apply(self, name, size, type):\n self.properties['name'] = name\n self.properties['size'] = size\n self.properties['type'] = type",
"def input_size(self):\n raise NotImplementedError()",
"def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")",
"def setoutputsize(self, size, column=None):\r\n if self._closed:\r\n raise Error('The cursor has been closed.')\r\n if self.connection._closed:\r\n raise Error('The connection to the database has been closed.')\r\n else:\r\n pass",
"def set_inputs(self, item_data):\n self.item_type = item_data[0]\n self.size = item_data[1]\n self.toppings = item_data[2]",
"def set_size(self, w, h):\n\t\tpass",
"def getSize(self):\n size = [None,None]\n try:\n for i in self.itemType.find('parameters'):\n paramType = i.find('type').text.strip()\n if paramType.startswith('size-w'):\n size[0] = round(float(self.params[i.find('name').text]))\n if paramType.startswith('size-h'):\n size[1] = round(float(self.params[i.find('name').text]))\n except:\n pos = [16,16]\n return size",
"def set_parameters(self, population_size=540, extern_arc_rate=2.6, pbest_factor=0.11, hist_mem_size=6, **kwargs):\n super().set_parameters(population_size=population_size,\n individual_type=kwargs.pop('individual_type', SolutionSHADE), **kwargs)\n self.extern_arc_rate = extern_arc_rate\n self.pbest_factor = pbest_factor\n self.hist_mem_size = hist_mem_size",
"def product_sizes(self, product_sizes):\n\n self._product_sizes = product_sizes",
"def _prep_values(\n self,\n size: Union[\n int, float, List[int], Tuple[int, int], List[float], Tuple[float, float]\n ],\n ) -> Union[List[int], List[float], Tuple[int, int], Tuple[float, float]]:\n size = [size] * 2 if not isinstance(size, (list, tuple)) else size\n assert len(size) == 2\n return size",
"def __init__(self, size):\n self.size = size",
"def __init__(self, size):\n self.size = size",
"def add_params(size, name=\"\"):\n if len(size) == 1:\n print(\"vector \" + name + \": \" + str(size[0]) + \"; uniform in [-0.1, 0.1]\")\n else:\n print(\"matrix \" + name + \": \" + str(size[0]) + \" x \" + str(size[1]) + \"; uniform in [-0.1, 0.1]\")\n\n size_int = tuple([int(ss) for ss in size])\n return torch.nn.Parameter(torch.empty(size_int).uniform_(-0.1, 0.1))",
"def size(self, *args):\n pass",
"def _setVals(self, cmd_length=0):\n self.cmd_length = cmd_length",
"def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))",
"def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size"
]
| [
"0.6015532",
"0.5730435",
"0.56628126",
"0.547357",
"0.54327464",
"0.5289766",
"0.5288654",
"0.5285771",
"0.5240631",
"0.5234483",
"0.51926106",
"0.5188808",
"0.5132499",
"0.50914514",
"0.50674397",
"0.49855158",
"0.49674615",
"0.49446407",
"0.49392125",
"0.4936378",
"0.49280328",
"0.49124953",
"0.48999023",
"0.48705366",
"0.48705366",
"0.4867899",
"0.48613346",
"0.4851132",
"0.4851067",
"0.48428673"
]
| 0.6343714 | 0 |
Set a column buffer size for fetches of large columns (e.g. LONGs, BLOBs, etc.). The column is specified as an index into the result sequence. Not specifying the column will set the default size for all large columns in the cursor. This method would be used before the executeXXX() method is invoked. Implementations are free to have this method do nothing and users are free to not use it. | def setoutputsize(self, size, column=None):
if self._closed:
raise Error('The cursor has been closed.')
if self.connection._closed:
raise Error('The connection to the database has been closed.')
else:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setoutputsize(self, size, column=None):\n pass",
"def setColumnWidth(self, column, newWidth = None):\n\t\t\t\tdef yieldWidths():\n\t\t\t\t\tfor i, row in enumerate(self.thing.iter_rows(), start = 1):\n\t\t\t\t\t\twidth = self.getCellWidth(i, column)\n\t\t\t\t\t\tif (width is not None):\n\t\t\t\t\t\t\tyield width\n\n\t\t\t\tif (newWidth is None):\n\t\t\t\t\t#Find the longest cell in the column\n\t\t\t\t\tpossibleWidths = tuple(yieldWidths())\n\t\t\t\t\tif (possibleWidths):\n\t\t\t\t\t\tnewWidth = max(possibleWidths)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewWidth = -1 #Compensate for blank columns\n\n\t\t\t\t#Apply the new width\n\t\t\t\tnewWidth += 2\n\t\t\t\tself.thing.column_dimensions[openpyxl.utils.get_column_letter(column)].width = newWidth",
"def setBufferSize(self, buffer_size):\n DPxSetDinBuffSize(buffer_size)",
"def set_column(self, column_name, column):\n _key_guard(column_name, 'Column name')\n self._set_column(column_name, column)",
"def SetColumn(self, column, info):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n w = self._columns[column].GetWidth()\r\n self._columns[column] = info\r\n \r\n if w != info.GetWidth():\r\n self._total_col_width += info.GetWidth() - w\r\n self._owner.AdjustMyScrollbars()\r\n \r\n self._owner._dirty = True",
"def setColumn(self, column_number, column):\n self.data[column_number] = column\n return",
"def set_file_size(self, file_path, row):\n row[_column_name] = os.path.getsize(file_path)",
"def set_column(self,column):\n\t\tif ( not self.validate(1,column)):\n\t\t\treturn\n\t\tself.Grid[column] = 0xff",
"def column_number(self, column_number):\n\n self._column_number = column_number",
"def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.LargeBinary(length=kwargs.get(\"max_length\"))",
"def setCol(self, col):\n self.column = col",
"def column(self, column):\n\n self._column = column",
"def setcolumn(self, column, value, accept_small_names=True):\n if column in self.keys():\n self[column] = value\n return\n elif accept_small_names:\n if self[\"__psvcolumnstracker__\"].get(column):\n self.__setattr__(column, value)\n return\n if not accept_small_names:\n raise ValueError(\"'{}'\".format(column))\n else:\n raise ValueError(\"'{}'. Make sure the shorterned columns name have no collisions\".format(column))",
"def SetNumberOfColumns(self, ncols):\n if isinstance(ncols, float):\n ncols = int(ncols)\n if self.__ncols != ncols:\n self.__ncols = ncols\n self.Modified()",
"def set_column_autowidth(worksheet: Worksheet, column: int):\n maxwidth = get_column_width(worksheet=worksheet, column=column)\n if maxwidth is None:\n return\n worksheet.set_column(first_col=column, last_col=column, width=maxwidth)",
"def _set_column(self, column_name, column):\n self._dirty = True\n\n if column.ndim != 1:\n raise ValueError(\"Can only add one-dimensional columns.\")\n if column.dtype.hasobject:\n # Attempting to create a specific non-object based numpy type.\n try:\n first = column[0]\n except IndexError:\n column = np.array([])\n else:\n try:\n # Determining type from the first element.\n if isinstance(first, datetime.datetime):\n # Datetime.\n column = np.array(column, dtype='datetime64[us]')\n elif isinstance(first, datetime.timedelta):\n # Timedelta.\n try:\n column = np.array(column, dtype='timedelta64[us]')\n except TypeError:\n # This exception can occur in numpy 1.9.1 on 32-bit\n # Windows if there is a mix of zero-value and\n # non-zero-value timedeltas. Work around by not\n # converting the zero-value timedeltas to numpy,\n # but using it as the default value instead.\n temp_column = np.zeros_like(\n column, dtype='timedelta64[us]')\n for i, v in enumerate(column):\n if v != datetime.timedelta(0):\n temp_column[i] = v\n column = temp_column\n else:\n # No conversion possible.\n raise ValueError()\n except (ValueError, TypeError):\n raise Exception(\n u'Unsupported object type in column {}'.format(\n column_name))\n\n column = Column(np.array(column))\n self._set_column_column(column_name, column)\n self._number_of_columns = len(column)",
"def set_max_message_size(self, size: int = 1_073_741_824) -> None:\n self.set_db_conf(\"proto-max-bulk-len\", str(size))",
"def SetColumnWidth(self, column, width):\r\n \r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n\r\n self._total_col_width -= self._columns[column].GetWidth()\r\n self._columns[column].SetWidth(width)\r\n self._total_col_width += width\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True",
"def _get_column_chunksize(chunksize, name):\n if hasattr(chunksize, 'keys'):\n return chunksize.get(name, DEFAULT_CHUNKSIZE)\n else:\n # catch a bug\n if hasattr(chunksize, 'append') or hasattr(chunksize, 'discard'):\n raise ValueError(\n 'expected dict, str, or number, got {type(chunksize)}'\n )\n return chunksize",
"def megabyte(self, megabyte):\n\n self._megabyte = megabyte",
"def set_column(self, col_identifier, col_contents):\n try:\n if type(col_identifier) == int:\n self.data[col_identifier] = col_contents\n elif isinstance(col_identifier, basestring):\n # set column by title.\n num = self.get_colnumber(col_identifier)\n if num is not None and type(col_contents) == list:\n self.set_column(num, (col_identifier, col_contents))\n else:\n raise IndexError\n\n except IndexError:\n # The column isn't there already; append a new one\n if type(col_identifier) == int:\n self.data.append(col_contents)\n elif isinstance(col_identifier, basestring):\n self.data.append((col_identifier, col_contents))",
"def _SetWidth(self, column_index, content_length):\n # Updates the width at position column_index to be the max of the existing\n # value and the new content's length, or this instance's max_column_width if\n # the value would be greater than max_column_width.\n if column_index == len(self._widths):\n self._widths.append(0)\n\n new_width = max(self._widths[column_index], content_length)\n if self._max_column_width is not None:\n new_width = min(self._max_column_width, new_width)\n self._widths[column_index] = new_width",
"def org_apache_felix_http_jetty_header_buffer_size(self, org_apache_felix_http_jetty_header_buffer_size: ConfigNodePropertyInteger):\n\n self._org_apache_felix_http_jetty_header_buffer_size = org_apache_felix_http_jetty_header_buffer_size",
"def set_point_size(self, point_size=0.0):\r\n for b in self.buf:\r\n b.unib[8] = point_size",
"def use_buffer(self, buffer_size):\n self.__buffer_size = buffer_size\n if self.__buffer is None:\n self.__buffer = []",
"def position_column(self, position_column):\n\n self._position_column = position_column",
"def do_buffer_size(num: int):\n if num == '':\n print(len(cmd_parser.daq.data))\n else:\n try:\n # TODO support rest of args to buffer resize\n cmd_parser.daq.buffer_resize(int(num))\n except ValueError:\n print('invalid input, [num] must be of type <int>')",
"def set_maxItemSize(self, maxItemSize):\n if self.__log:\n self.__logger.info(f\"Setting max item size to {maxItemSize}\")\n self.__maxItemSize = maxItemSize\n self.__handle_cache_size()",
"def org_apache_felix_http_jetty_request_buffer_size(self, org_apache_felix_http_jetty_request_buffer_size: ConfigNodePropertyInteger):\n\n self._org_apache_felix_http_jetty_request_buffer_size = org_apache_felix_http_jetty_request_buffer_size",
"def OnColumnResize(self,event):\r\n iColumn = event.GetColumn()\r\n column = self.data.getParam('columns')[iColumn]\r\n self.data.updateParam('colWidths')[column] = self.gList.GetColumnWidth(iColumn)"
]
| [
"0.5896796",
"0.5542264",
"0.53449637",
"0.53362805",
"0.53230387",
"0.5317424",
"0.5267313",
"0.5227576",
"0.5193941",
"0.51717675",
"0.5167473",
"0.51413244",
"0.5122026",
"0.510181",
"0.50983393",
"0.50757354",
"0.5060915",
"0.50503165",
"0.50124973",
"0.499577",
"0.49915087",
"0.49799427",
"0.4954342",
"0.49242735",
"0.49180844",
"0.49176627",
"0.49156404",
"0.491136",
"0.48984957",
"0.48420227"
]
| 0.6449317 | 0 |
Close the cursor now (rather than whenever __del__ is called). The cursor will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the cursor. | def close(self):
if self.connection._closed:
raise Error('The connection to the database has been closed.')
if self._closed:
raise Error('The cursor has already been closed.')
else:
self._closed = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close(self):\n self.cursor.close()",
"def close(self):\r\n if self.cursor is None:\r\n return\r\n\r\n self.cursor.close()\r\n self.cursor = None\r\n self.app_id = None\r\n self.iden_id = None",
"def _close_cursor(self, cursor):\n\t\twith warnings.catch_warnings():\n\t\t\twarnings.simplefilter(\"ignore\")\n\t\t\tconnection = cursor.connection\n\t\tcursor.close()\n\t\tconnection.close()",
"def close(self):\n self.closed = True\n for cursor in self.cursors:\n try:\n cursor.close()\n except exceptions.Error:\n pass # already closed",
"def close_cursor(self, cursor=None):\n if cursor != None:\n cursor.close()\n else:\n self.cur.close()\n\n self.dbcon.commit()",
"def _destroy(self):\n self.cursor.close()",
"def _close_hive_cursor(cursor):\n _print_info('Closing hive cursor.')\n cursor.close()",
"def __close(self):\n\n self.__cursor.close()\n self.__connection.close()",
"def close(self):\n if self.cursor:\n self.cursor.close()\n if self.conn:\n self.conn.close()",
"def __del__(self):\n if self.cursor is not None:\n self.cursor.close()\n if self.conn is not None:\n self.conn.close()",
"def __del__(self):\n self.cursor.close()\n del self.cursor\n self.conn.close()",
"def close(cursor, conn):\n cursor.close()\n conn.close()",
"def close(connection, cursor):\n cursor.close()\n connection.close()",
"def close(self):\n self.cursor.close()\n self.db.close()",
"def disconnect(self):\n\n try:\n self.cursor.close()\n self.db.close()\n except cx_Oracle.DatabaseError:\n pass",
"def close(self):\n # ret = ODBC_API.SQLCloseCursor(self.stmt_h)\n # check_success(self, ret)\n if self.connection.connected:\n for _ in (SQL_CLOSE, SQL_UNBIND, SQL_RESET_PARAMS):\n check_success(self, ODBC_API.SQLFreeStmt(self.stmt_h, _))\n check_success(self, ODBC_API.SQLFreeHandle(SQL_HANDLE_STMT, self.stmt_h))\n self.closed = True",
"def close(self):\n# self.cursor.close()\n\tself.db.close()",
"def __del__(self):\n\n self.dbCursor.close()\n self.dbConnection.close()",
"def clear_cursor(\n self,\n *,\n cursor: str,\n error_trace: t.Optional[bool] = None,\n filter_path: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n human: t.Optional[bool] = None,\n pretty: t.Optional[bool] = None,\n ) -> ObjectApiResponse[t.Any]:\n if cursor is None:\n raise ValueError(\"Empty value passed for parameter 'cursor'\")\n __path = \"/_sql/close\"\n __body: t.Dict[str, t.Any] = {}\n __query: t.Dict[str, t.Any] = {}\n if cursor is not None:\n __body[\"cursor\"] = cursor\n if error_trace is not None:\n __query[\"error_trace\"] = error_trace\n if filter_path is not None:\n __query[\"filter_path\"] = filter_path\n if human is not None:\n __query[\"human\"] = human\n if pretty is not None:\n __query[\"pretty\"] = pretty\n __headers = {\"accept\": \"application/json\", \"content-type\": \"application/json\"}\n return self.perform_request( # type: ignore[return-value]\n \"POST\", __path, params=__query, headers=__headers, body=__body\n )",
"def close(conn, cursor):\n conn.commit()\n cursor.close()\n conn.close()",
"def close_connection(self):\n if self.cursor is None and self.database is None:\n # if we don't have an open connection, do nothing\n return\n self.cursor.close()\n self.database.close()",
"def __del__(self) :\r\n\t\tif self.DB_Connect is not None :\r\n\t\t\tself.DB_Cursor = None\r\n\t\t\tself.DB_Connect.close()\r\n\t\t\tself.DBlock.release()\r\n\t\treturn",
"def close( self ):\n self.__del__()",
"def close(self):\n self.connection.commit()\n self.cursor.close()\n self.connected = False",
"def _closeConnection(cursor, db):\n cursor.commit()\n cursor.close()\n db.close()",
"def close(self):\r\n debug.write(\"[SourceRPG] handling SQL close\", 1)\r\n self.cursor.close()\r\n self.connection.close()\r\n debug.write(\"[SourceRPG] SQL close handled\", 1)",
"def close(self):\n self.__exit__(None, None, None)",
"def close(self):\n self.__exit__(None, None, None)",
"def close_connection(self):\n self.cursor.close()\n self.connection.close()",
"def closeConnection(cnx, cursor):\r\n cnx.commit()\r\n cursor.close()\r\n cnx.close()"
]
| [
"0.8026459",
"0.7676936",
"0.76766557",
"0.76722026",
"0.7462743",
"0.73252213",
"0.7285407",
"0.72512263",
"0.71218365",
"0.70642406",
"0.7033755",
"0.69990253",
"0.6927757",
"0.6773506",
"0.6741317",
"0.67074233",
"0.6701096",
"0.66495186",
"0.6593548",
"0.6579718",
"0.6537352",
"0.6521567",
"0.6520162",
"0.650472",
"0.6480966",
"0.64628744",
"0.6460526",
"0.6460526",
"0.6458995",
"0.6453051"
]
| 0.7831996 | 1 |
Merge multiple SDR images into a single HDR image after demosacing. This is a wrapper function that extracts metadata and calls the appropriate function. | def merge(files, do_align=False, demosaic_first=True, color_space='sRGB', wb=[1, 1, 1],
saturation_percent=0.98, normalize=False):
data = get_metadata(files, color_space, saturation_percent)
if demosaic_first:
HDR, num_sat = imread_demosaic_merge(files, data, do_align, saturation_percent)
else:
HDR, num_sat = imread_merge_demosaic(files, data, do_align)
if num_sat > 0:
logger.warning(f'{num_sat/(data["h"]*data["w"]):.3f}% of pixels (n={num_sat}) are ' \
'saturated in the shortest exposure. The values for these pixels will ' \
'be inaccurate.')
if HDR.min() < 0:
logger.info('Clipping negative pixels.')
HDR[HDR < 0] = 0
assert len(wb) == 3, 'Provide list [R G B] corresponding to white patch in the image'
HDR = HDR * np.array(wb)[None, None, :]
if normalize:
HDR = HDR / HDR.max()
return HDR.astype(np.float32) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_datasets(dslist):\n # We use a variant of our fast stitching routine\n # So first create a sorted list of angles and source files\n container = []\n print 'Passed %d datasets for merging ' % len(dslist)\n proc_info = \"\"\"This dataset was created by collating points from multiple datasets. Data reduction \n information for the individual source datasets is as follows:\"\"\"\n title_info = \"Merge:\"\n for num,dataset in enumerate(dslist):\n storage_info = zip(dataset.axes[0],dataset.storage,dataset.var.storage)\n container.extend(storage_info)\n proc_info += \"\\n\\n===Dataset %s===\\n\" % str(dataset.title)\n try:\n proc_info += dataset.harvest_metadata(\"CIF\")[\"_pd_proc_info_data_reduction\"]\n except KeyError:\n pass\n title_info = title_info + dataset.title + ':'\n # So we have a list of angle,intensity,variance triples which we sort on angle\n container = sorted(container, key=lambda(angle,intensity,variance):angle)\n angles = map(lambda (a,b,c):a,container)\n intensities = map(lambda (a,b,c):b,container)\n variances = map(lambda (a,b,c):c,container)\n rs = Dataset(intensities)\n rs.var = variances\n rs.axes[0] = angles\n rs.axes[0].title = 'Two theta (degrees)'\n rs.title = title_info\n # Add metadata\n AddCifMetadata.add_standard_metadata(rs)\n rs.add_metadata(\"_pd_proc_info_data_reduction\",proc_info,\"CIF\")\n return rs",
"def imread_demosaic_merge(files, metadata, do_align, sat_percent):\n\n\tlogger.info('Demosaicing before merging.')\n\t\n\t# Check for saturation in shortest exposure\n\tshortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n\tlogger.info(f'Shortest exposure is {shortest_exposure}')\n\n\tif do_align:\n\t\tref_idx = np.argsort(metadata['exp'] * metadata['gain']\n\t\t\t\t\t\t\t * metadata['aperture'])[len(files)//2]\n\t\tref_img = io.imread(files[ref_idx]) / metadata['exp'][ref_idx] \\\n\t\t\t\t\t\t\t\t\t\t\t/ metadata['gain'][ref_idx] \\\n\t\t\t\t\t\t\t\t\t\t\t/ metadata['aperture'][ref_idx]\n\n\tnum_saturated = 0\n\tnum, denom = np.zeros((2, metadata['h'], metadata['w'], 3))\n\tfor i, f in enumerate(tqdm.tqdm(files)):\n\t\traw = rawpy.imread(f)\n\t\timg = io.imread_libraw(raw, color_space=metadata['color_space'])\n\t\tif do_align and i != ref_idx:\n\t\t\tscaled_img = img / metadata['exp'][i] \\\n\t\t\t\t\t\t\t / metadata['gain'][i] \\\n\t\t\t\t\t\t\t / metadata['aperture'][i]\n\t\t\timg = align(ref_img, scaled_img, img)\n\n\t\t# Ignore saturated pixels in all but shortest exposure\n\t\tif i == shortest_exposure:\n\t\t\tunsaturated = np.ones_like(img, dtype=bool)\n\t\t\tnum_sat = np.count_nonzero(np.logical_not(\n\t\t\t\tget_unsaturated(raw.raw_image_visible, metadata['saturation_point'],\n\t\t\t\t\t\t\t\timg, sat_percent))) / 3\n\t\telse:\n\t\t\tunsaturated = get_unsaturated(raw.raw_image_visible, metadata['saturation_point'],\n\t\t\t\t\t\t\t\t\t\t img, sat_percent)\n\t\tX_times_t = img / metadata['gain'][i] / metadata['aperture'][i]\n\t\tdenom[unsaturated] += metadata['exp'][i]\n\t\tnum[unsaturated] += X_times_t[unsaturated]\n\n\tHDR = num / denom\n\n\treturn HDR, num_sat",
"def combine_sides(imgID_list_blue, imgID_list_red, output=None, splot='yes'):\r\n \r\n blue_files = ['blue{:04d}_flux.spec.fits'.format(imgID) for imgID in imgID_list_blue]\r\n red_files = ['red{:04d}_flux.spec.fits'.format(imgID) for imgID in imgID_list_red]\r\n blue_err_files = ['blue{:04d}_flux.err.fits'.format(imgID) for imgID in imgID_list_blue]\r\n red_err_files = ['red{:04d}_flux.err.fits'.format(imgID) for imgID in imgID_list_red]\r\n\r\n\r\n if output is None:\r\n hdr = pyfits.getheader(blue_files[0])\r\n obj = hdr['OBJECT'].replace(' ', '_')\r\n # create a unique name based on the input\r\n\r\n def ids_to_string(idlist):\r\n \"\"\"Create hyphen-delimited string from list of ints\"\"\"\r\n if len(idlist) == 1:\r\n return \"{:d}\".format(idlist[0])\r\n else:\r\n return \"-\".join([\"{:d}\".format(id) for id in idlist])\r\n\r\n output = obj + '_' + \\\r\n ids_to_string(imgID_list_blue) + '+' + \\\r\n ids_to_string(imgID_list_red) \r\n\r\n # clobber the old output files if they exist\r\n iraf.delete(output+'.*.fits', verify='no')\r\n iraf.delete(output+'.*.txt', verify='no')\r\n \r\n # determine dispersion: downsample to lower-resolution spectrum\r\n hdr = pyfits.getheader(blue_files[0])\r\n dw_blue = hdr['CDELT1']\r\n hdr = pyfits.getheader(red_files[0])\r\n dw_red = hdr['CDELT1']\r\n dw = np.max([dw_blue, dw_red])\r\n\r\n # find wavelength ranges\r\n def wavelength_range(fits_list):\r\n \"\"\"Given an input list of fits spectra from extract1D, \r\n return the absolute minimum and maximum wavelength ranges\"\"\"\r\n mins = []\r\n maxes = []\r\n for fname in fits_list:\r\n spec = np.genfromtxt(fname.replace('fits', 'txt'), names='wave, flux', \r\n dtype='f4, f4')\r\n mins.append(spec['wave'].min())\r\n maxes.append(spec['wave'].max())\r\n return [np.array(mins).min(), np.array(maxes).max()]\r\n\r\n blue_range = wavelength_range(blue_files)\r\n red_range = wavelength_range(red_files)\r\n\r\n # find overlap region\r\n if red_range[0] >= blue_range[1]:\r\n raise ValueError('No overlap in wavelength solution between sides!')\r\n\r\n # specify total spectral range\r\n w1 = blue_range[0]\r\n w2 = red_range[1]\r\n\r\n # re-disperse to common wavelength solution\r\n def redisperse_list(files,dw,w1,w2,key='spec'):\r\n \"\"\"Re-disperse a list of spectra.\r\n\r\n Wraps iraf.dispcor.\r\n\r\n Parameters\r\n ----------\r\n files : list of strings\r\n Files to disperse\r\n dw : int\r\n rs Spectral dispersion [Angstroms per pixel]\r\n w1 : int\r\n Minimum wavelength [Angstrom]\r\n w2 : int\r\n Maximum wavelength [Angstrom]\r\n key : {'spec' (default) or 'err'}\r\n Whether the files are spectra or uncertainties.\r\n \"\"\"\r\n input_list = ','.join(files)\r\n disp_files = [f.replace(key, key+'-disp') for f in files]\r\n output_disp_list = ','.join(disp_files)\r\n iraf.unlearn('dispcor')\r\n iraf.dispcor.input = input_list\r\n iraf.dispcor.output = output_disp_list\r\n # keep existing wavelength endpoints\r\n iraf.dispcor.dw = dw\r\n iraf.dispcor.w1 = w1\r\n iraf.dispcor.w2 = w2\r\n iraf.dispcor.flux = 'no'\r\n iraf.dispcor()\r\n # write text files\r\n for output in disp_files:\r\n iraf.wspectext(output, output.replace('fits', 'txt'), header=\"no\")\r\n\r\n return disp_files\r\n\r\n # delete any lingering files\r\n iraf.delete('*-disp.fits', verify='no')\r\n iraf.delete('*-disp.txt', verify='no')\r\n\r\n blue_files_redisp = redisperse_list(blue_files, dw, w1, w2)\r\n red_files_redisp = redisperse_list(red_files, dw, w1, w2)\r\n blue_err_files_redisp = redisperse_list(blue_err_files, dw, w1, w2, key='err')\r\n red_err_files_redisp = redisperse_list(red_err_files, dw, w1, w2, key='err')\r\n\r\n # combine individual sides\r\n coadd_spectra(blue_files_redisp, 'tmp-blue')\r\n coadd_spectra(red_files_redisp, 'tmp-red')\r\n\r\n # find optimum weighting between sides\r\n\r\n # combine sides, weighted by uncertainties\r\n coadd_spectra(['tmp-blue.spec.fits', 'tmp-red.spec.fits'], output,\r\n one_side=False)\r\n\r\n # clean up\r\n iraf.delete('*-disp.fits', verify='no')\r\n iraf.delete('*-disp.txt', verify='no')\r\n iraf.delete('tmp-*.fits', verify='no')\r\n iraf.delete('tmp-*.txt', verify='no')\r\n \r\n if splot == 'yes':\r\n iraf.splot(output+'.spec')",
"def combine_sides_scombine(imgID_list_blue, imgID_list_red, output=None, splot='yes'):\r\n\r\n \r\n blue_files = ['blue{:04d}_flux.spec.fits'.format(imgID) for imgID in imgID_list_blue]\r\n red_files = ['red{:04d}_flux.spec.fits'.format(imgID) for imgID in imgID_list_red]\r\n\r\n input_blue_list = ','.join(blue_files)\r\n\r\n if output is None:\r\n hdr = pyfits.getheader(blue_files[0])\r\n obj = hdr['OBJECT'].replace(' ', '_')\r\n # create a unique name based on the input\r\n\r\n def ids_to_string(idlist):\r\n if len(idlist) == 1:\r\n return \"{:d}\".format(idlist[0])\r\n else:\r\n return \"-\".join([\"{:d}\".format(id) for id in idlist])\r\n\r\n output = obj + '_' + \\\r\n ids_to_string(imgID_list_blue) + '+' + \\\r\n ids_to_string(imgID_list_red) + '.fits'\r\n\r\n # clobber the old output file\r\n iraf.delete(output, verify='no')\r\n iraf.delete(output.replace('fits', 'txt'), verify='no')\r\n \r\n # determine dispersion: downsample to lower-resolution spectrum\r\n hdr = pyfits.getheader(blue_files[0])\r\n dw_blue = hdr['CDELT1']\r\n hdr = pyfits.getheader(red_files[0])\r\n dw_red = hdr['CDELT1']\r\n dw = np.max([dw_blue, dw_red])\r\n\r\n # cut off blue side redder than 5500\r\n trim_files = [f.replace('spec', 'trim') for f in blue_files]\r\n output_trim_list = ','.join(trim_files)\r\n iraf.unlearn('dispcor')\r\n iraf.dispcor.input = input_blue_list\r\n iraf.dispcor.output = output_trim_list\r\n # wavelength solution\r\n iraf.dispcor.w1 = 3800\r\n iraf.dispcor.w2 = 5500\r\n iraf.dispcor.dw = dw\r\n iraf.dispcor.flux = 'no'\r\n iraf.dispcor()\r\n\r\n all_files = trim_files + red_files\r\n input_file_list = ','.join(all_files)\r\n\r\n iraf.unlearn('scombine')\r\n iraf.scombine.input = input_file_list\r\n iraf.scombine.output = output\r\n iraf.scombine.combine = 'average'\r\n iraf.scombine.group = 'all'\r\n iraf.scombine.first = 'no'\r\n iraf.scombine.dw = dw\r\n iraf.scombine.scale = 'none'\r\n # attempt to join sides smoothly.\r\n # iraf.scombine.zero = 'median'\r\n # iraf.scombine.sample = ''\r\n iraf.scombine()\r\n\r\n iraf.wspectext(output, output.replace('fits', 'txt'), header=\"no\")\r\n\r\n # clean up\r\n iraf.delete('blue*.trim.fits', verify='no')\r\n \r\n if splot == 'yes':\r\n iraf.splot(output)",
"def checksImages(self):\n metadata=[]\n for image in self.meta['sources']:\n with rasterio.open(image) as src:\n metaData=src.meta\n \n assert metaData['driver'] == 'GTiff', \"Driver is not supported: {0}\".format(metaData['driver'])\n assert metaData['count'] == len(self.meta['bandNames']), \"Nbands incorrect, expected: {0}, {1} provided\".format(metaData['count'],len(self.meta['bandNames']))\n \n metadata.append({'dtype': metaData['dtype'], 'driver': metaData['driver'], 'nodata': metaData['nodata'], 'nBands': metaData['count'],'crs': src.crs.to_string()})\n \n assert len(set([item['dtype'] for item in metadata])) == 1, \"Images list dtypes aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['dtype'] for item in metadata])))\n assert len(set([item['driver'] for item in metadata])) == 1, \"Images list drivers aren't compatibles. Expected: 1, 1 provided\".format(metaData['count'],len(set([item['driver'] for item in metadata])))\n assert len(set([item['nodata'] for item in metadata])) == 1, \"Images list nodata values aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nodata'] for item in metadata])))\n assert len(set([item['nBands'] for item in metadata])) == 1, \"Images list nBands number aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['nBands'] for item in metadata])))\n assert len(set([item['crs'] for item in metadata])) == 1, \"Images list crs aren't compatibles. Expected: 1, {1} provided\".format(metaData['count'],len(set([item['crs'] for item in metadata]))) \n return metadata[0]",
"def get_hdr(patient_dir,filenames,get_single=False):\n\n hdr_lis = []\n\n if get_single:\n\n for filename in os.listdir(patient_dir):\n if filename.endswith('.hdr'):\n\n\n print(filename)\n hdr = load_hdr(os.path.join(patient_dir,filename))\n hdr = rescale_volume(hdr,old_dimension=[61,73,61],new_dimension=(64,64,64),target_dimension=[64,64,64])\n hdr = normalize(hdr)\n print(hdr.shape)\n hdr_lis.append(hdr)\n\n return hdr_lis\n\n\n else:\n hdr_lis = []\n hdr_stacked = np.empty((len(filenames),64,64,64))\n for row in filenames:\n print(row)\n # hdr_filename = row['filename']\n hdr = load_hdr(os.path.join(patient_dir,row))\n hdr = rescale_volume(hdr,old_dimension=[61,73,61],new_dimension=(64,64,64),target_dimension=[64,64,64])\n hdr = normalize(hdr)\n hdr_lis.append(hdr)\n print(len(hdr_lis))\n for index, hdr in enumerate(hdr_lis):\n hdr_stacked[index,:,:,:] = hdr\n\n print('Stacked hdr shape {}'.format(hdr_stacked.shape))\n\n return hdr_stacked",
"def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output",
"def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):\n # Detect keypoints in each image\n keypoints = [] # keypoints[i] corresponds to imgs[i]\n for img in imgs:\n kypnts = corner_peaks(harris_corners(img, window_size=3),\n threshold_rel=0.05,\n exclude_border=8)\n keypoints.append(kypnts)\n # Describe keypoints\n descriptors = [] # descriptors[i] corresponds to keypoints[i]\n for i, kypnts in enumerate(keypoints):\n desc = describe_keypoints(imgs[i], kypnts,\n desc_func=desc_func,\n patch_size=patch_size)\n descriptors.append(desc)\n # Match keypoints in neighboring images\n matches = [] # matches[i] corresponds to matches between\n # descriptors[i] and descriptors[i+1]\n for i in range(len(imgs)-1):\n mtchs = match_descriptors(descriptors[i], descriptors[i+1], 0.7)\n matches.append(mtchs)\n\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n\n return panorama",
"def sum_images(filelist):\n\n nfiles = np.size(filelist)\n\n print(\"Summing together {} files\".format(nfiles))\n\n ims = []\n\n for fname in filelist:\n hdu = f.open(fname)\n ims.append(hdu[0].data)\n\n ims = np.array(ims)\n\n sum_im = np.nansum(ims, axis=0)\n hdu[0].data = sum_im\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Summed up the following images:\"\n\n for fname in filelist:\n hdu[0].header['HISTORY'] = fname\n\n hdu[0].header['HISTORY'] = \"######################\"\n\n outname = filelist[-1].split('.')[0]+'_summed.fits'\n\n print(\"Writing out final file to {}\".format(outname))\n\n hdu.writeto(outname, overwrite=True)",
"def imread_merge_demosaic(files, metadata, do_align, pattern='RGGB'):\n\n\tif do_align:\n\t\tref_idx = np.argsort(metadata['exp'] * metadata['gain']\n\t\t\t\t\t\t\t * metadata['aperture'])[len(files)//2]\n\t\tref_img = io.imread(files[ref_idx]) / metadata['exp'][ref_idx] \\\n\t\t\t\t\t\t\t\t\t\t\t/ metadata['gain'][ref_idx] \\\n\t\t\t\t\t\t\t\t\t\t\t/ metadata['aperture'][ref_idx]\n\n\tlogger.info('Merging before demosaicing.')\n\traw = rawpy.imread(files[0])\n\t\n\t# More transforms available here:\n\t# http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html\n\tif metadata['color_space'] == 'raw':\n\t\tcolor_mat = np.eye(3)\n\telse:\n\t\tassert (raw.rgb_xyz_matrix[-1] == 0).all()\n\t\tnative2xyz = np.linalg.inv(raw.rgb_xyz_matrix[:-1])\n\n\t\tif metadata['color_space'] == 'xyz':\n\t\t\txyz2out = np.eye(3)\n\t\telif metadata['color_space'] == 'srgb':\n\t\t\txyz2out = np.array([[3.2406, -1.5372, -0.4986],\n\t\t\t\t\t\t\t\t[-0.9689, 1.8758, 0.0415],\n\t\t\t\t\t\t\t\t[0.0557, -0.2040, 1.0570]])\n\t\telif metadata['color_space'] == 'adobe':\n\t\t\txyz2out = np.array([[2.0413690, -0.5649464, -0.3446944],\n\t\t\t\t\t\t\t\t[-0.9692660, 1.8760108, 0.0415560],\n\t\t\t\t\t\t\t\t[0.0134474, -0.1183897, 1.0154096]])\n\t\telse:\n\t\t\tlogger.warning('Unsupported color-space, switching to camara raw.')\n\t\t\tnative2xyz = np.eye(3)\n\t\t\txyz2out = np.eye(3)\n\t\tcolor_mat = (xyz2out @ native2xyz).transpose()\n\n\t# Check for saturation in shortest exposure\n\tshortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n\tlogger.info(f'Shortest exposure is {shortest_exposure}')\n\n\tnum_saturated = 0\n\tnum, denom = np.zeros((2, metadata['h'], metadata['w']))\n\tblack_frame = np.tile(metadata['black_level'].reshape(2, 2),\n\t\t\t\t\t\t (metadata['h']//2, metadata['w']//2))\n\tfor i, f in enumerate(tqdm.tqdm(files)):\n\t\timg = io.imread(f, libraw=False).astype(np.float32)\n\t\tif do_align and i != ref_idx:\n\t\t\tscaled_img = io.imread(f).astype(np.float32) / metadata['exp'][i] \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t / metadata['gain'][i] \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t / metadata['aperture'][i]\n\t\t\timg = align(ref_img, scaled_img, img)\n\n\t\t# Ignore saturated pixels in all but shortest exposure\n\t\tif i == shortest_exposure:\n\t\t\tunsaturated = np.ones_like(img, dtype=bool)\n\t\t\tnum_sat = np.count_nonzero(np.logical_not(get_unsaturated(\n\t\t\t\timg, metadata['saturation_point'])))\n\t\telse:\n\t\t\tunsaturated = get_unsaturated(img, metadata['saturation_point'])\n\t\t\n\t\t# Subtract black level for linearity\n\t\timg -= black_frame\n\n\t\tX_times_t = img / metadata['gain'][i] / metadata['aperture'][i]\n\t\tdenom[unsaturated] += metadata['exp'][i]\n\t\tnum[unsaturated] += X_times_t[unsaturated]\n\t\t\n\tHDR_bayer = num / denom\n\n\t# Libraw does not support 32-bit values. Use colour-demosaicing instead:\n\t# https://colour-demosaicing.readthedocs.io/en/latest/manual.html\n\tlogger.info('Running bilinear demosaicing')\n\timport colour_demosaicing as cd\n\tHDR = cd.demosaicing_CFA_Bayer_bilinear(HDR_bayer, pattern=pattern)\n\n\t# Convert to output color-space\n\tlogger.info(f'Using color matrix: {color_mat}')\n\tHDR = HDR @ color_mat\n\n\treturn HDR, num_sat",
"def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn",
"def from_multiple_images(\n cls,\n path_list: Iterable[str | Path],\n dtype: np.dtype = np.uint16,\n **kwargs,\n ):\n obj = cls.from_demo_image()\n # save a combined image to a temporary dir, then load it back in as a PFDicomImage\n with TemporaryDirectory() as tmp:\n filename = osp.join(tmp, \"mydcm.dcm\")\n image.load_multiples(path_list, dtype=dtype, **kwargs).save(filename)\n obj.image = PFDicomImage(filename)\n return obj",
"def merge_spectra(datalist):\n resolution = get_resolution(datalist[0])\n concat = np.concatenate(datalist)\n # xvals = concat[:, 0]\n # print \"Median Resolution:\", resolution\n # axis = nonlinear_axis(np.amin(concat[:, 0]), np.amax(concat[:, 0]), resolution)\n axis = ud.nonlinear_axis(np.amin(concat[:, 0]), np.amax(concat[:, 0]), resolution)\n template = np.transpose([axis, np.zeros_like(axis)])\n print \"Length merge axis:\", len(template)\n for d in datalist:\n if len(d) > 1:\n newdat = ud.mergedata(template, d)\n # newdat=ud.lintegrate(d,axis)\n template[:, 1] += newdat[:, 1]\n return template",
"def concatenate_images(img_1, img_2):\n res_4 = None;\n if not (img_1 is None):\n # Resize Camera and Satellite Image:\n res_1 = cv2.resize(img_2, None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)\n res_2 = cv2.resize(img_1, None,fx=0.7, fy=0.7, interpolation = cv2.INTER_CUBIC)\n\n #Concatenate Camera and Satellite view on single image\n h_1 = res_1.shape[0];\n w_1 = res_1.shape[1];\n h_2 = res_2.shape[0];\n w_2 = res_2.shape[1];\n scale = float(h_1)/float(h_2);\n\n h_2 = h_1;\n w_2 = int(w_2*scale)\n dim = (w_2, h_2);\n res_3 = cv2.resize(res_2, dim, interpolation = cv2.INTER_CUBIC)\n\n res_4 = np.concatenate((res_1, res_3), axis=1)\n\n return res_4;",
"def merge_chips(images_files, *, win_bounds):\n datasets = [rasterio.open(p) for p in images_files]\n img, _ = rasterio.merge.merge(datasets, bounds=win_bounds, method=mean_merge_method)\n for ds in datasets:\n ds.close()\n return img",
"def _writeCombinedImage(self, array, filename):\n\n _fname = self.assoc.parlist[0]['outsingle']\n _file = pyfits.open(_fname, mode='readonly')\n _prihdu = pyfits.PrimaryHDU(header=_file[0].header,data=array)\n\n _pf = pyfits.HDUList()\n _pf.append(_prihdu)\n _pf.writeto(filename)\n\n _file.close()\n del _pf",
"def merge(file_list, tz_list=None):\n from sonde.formats.merge import MergeDataset\n\n if tz_list is None:\n tz_list = [default_static_timezone for fn in file_list]\n elif tz_list == 'auto':\n tz_list = ['auto' for fn in file_list]\n #else:\n # tz_list = [UTCStaticOffset(int(tz.lower().strip('utc')))\n # for tz in tz_list]\n\n metadata = dict()\n data = dict()\n\n metadata['dates'] = np.empty(0, dtype=datetime.datetime)\n metadata['data_file_name'] = np.empty(0, dtype='|S100')\n metadata['instrument_serial_number'] = np.empty(0, dtype='|S15')\n metadata['instrument_manufacturer'] = np.empty(0, dtype='|S15')\n\n for param, unit in master_parameter_list.items():\n data[param] = np.empty(0, dtype='<f8') * unit[-1]\n\n for file_name, tz in zip(file_list, tz_list):\n try:\n if tz == 'auto':\n tmp = Sonde(file_name)\n # tz = UTCStaticOffset(utc_offset)\n tz = find_tz(tmp.setup_time)\n elif isinstance(tz, str):\n tz = UTCStaticOffset(int(tz.lower().strip('utc')))\n dataset = Sonde(file_name, tzinfo=tz)\n except:\n warnings.warn('merged failed for file %s with error: %s' % (file_name, traceback.print_exc()), Warning)\n continue\n\n fn_list = np.zeros(len(dataset.dates), dtype='|S100')\n sn_list = np.zeros(len(dataset.dates), dtype='|S15')\n m_list = np.zeros(len(dataset.dates), dtype='|S15')\n\n fn_list[:] = os.path.split(file_name)[-1]\n sn_list[:] = dataset.serial_number\n m_list[:] = dataset.manufacturer\n\n metadata['dates'] = np.hstack((metadata['dates'], dataset.dates))\n metadata['data_file_name'] = np.hstack(\n (metadata['data_file_name'], fn_list))\n metadata['instrument_serial_number'] = np.hstack(\n (metadata['instrument_serial_number'], sn_list))\n metadata['instrument_manufacturer'] = np.hstack(\n (metadata['instrument_manufacturer'], m_list))\n\n no_data = np.zeros(len(dataset.dates))\n no_data[:] = np.nan\n for param in master_parameter_list.keys():\n if param in dataset.data.keys():\n tmp_data = dataset.data[param]\n else:\n tmp_data = no_data\n\n data[param] = np.hstack((data[param], tmp_data))\n print 'merged: %s' % file_name\n\n for param, unit in master_parameter_list.items():\n if np.all(np.isnan(data[param])):\n del data[param]\n else:\n data[param] = data[param] * unit[-1]\n\n return MergeDataset(metadata, data)",
"def merge(images, size, c_dim):\n h, w = images.shape[1], images.shape[2]\n \n img = np.zeros((h*size[0], w*size[1], c_dim))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j * h : j * h + h,i * w : i * w + w, :] = image\n #cv2.imshow(\"srimg\",img)\n #cv2.waitKey(0)\n \n return img",
"def main(vis_dirs, outdir):\n assert len(vis_dirs) == 4\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n for i, filename in enumerate(tqdm(os.listdir(vis_dirs[-1]))):\n # if i % 100 == 0:\n # print(i)\n\n files = [os.path.join(vis_dir, filename) for vis_dir in vis_dirs]\n outimg = os.path.join(outdir, filename)\n merge_four_images(files, outimg)\n\n print (\"Finished! Result dir is %s\" % outdir)",
"def merge_one_sensor(slist):\n r = strip_file(slist[0],leave_header=True)\n for s in slist[1:]:\n r += strip_file(s,leave_header=False)\n return r",
"def img_series_stats(image_ccd_lst,plots_path,obsdate):\n median_count = []\n mean_count = []\n \n source_hdu = CCDData(image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n median_count.append(np.median(a_file))\n mean_count.append(np.mean(a_file))\n \n min_count_for_median = np.min(median_count)\n min_count_for_mean = np.min(mean_count)\n max_count_for_median = np.max(median_count)\n max_count_for_mean = np.max(mean_count)\n \n plt.figure()\n plt.plot(mean_count, label='mean',color=\"palevioletred\")\n plt.axhline(y=min_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='min mean {:.2f}'.format(min_count_for_mean),alpha=1)\n plt.axhline(y=max_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='max mean {:.2f}'.format(max_count_for_mean),alpha=1)\n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Mean pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_mean.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()\n\n plt.figure()\n plt.plot(median_count, label='median',color=\"darkviolet\")\n plt.axhline(y=min_count_for_median,linestyle='-',linewidth=0.5,color='red',label='min median {:.2f}'.format(min_count_for_median),alpha=1)\n plt.axhline(y=max_count_for_median,linestyle='-',linewidth=0.5,color='red',label='max median {:.2f}'.format(max_count_for_median),alpha=1) \n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Median pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_median.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()",
"def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)",
"def merge_images(sources, targets, batch_size=16):\n _, _, h, w = sources.shape\n row = int(np.sqrt(batch_size))\n merged = np.zeros([3, row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n merged = merged.transpose(1, 2, 0)\n return merged",
"def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")",
"def product_stitch_sequential_metadata(\n input_meta_files: List[str],\n output_meta: Optional[str] = './tempMerged',\n output_format: Optional[str] = 'ENVI',\n verbose: Optional[bool] = False) -> None:\n\n # Create VRT and exit early if only one frame passed,\n # and therefore no stitching needed\n if len(input_meta_files) == 1:\n gdal.BuildVRT(output_meta+'.vrt', input_meta_files)\n return\n\n # Outputs\n output_meta = Path(output_meta).absolute()\n\n # Get raster attributes [SNWE, latlon_spacing, length, width. nodata]\n # from each input file\n\n # Initalize variables\n meta_attr_dicts = [] # metadata layers\n temp_snwe_list = []\n temp_latlon_spacing_list = []\n\n for meta_file in input_meta_files:\n meta_attr_dicts.append(get_GUNW_attr(meta_file))\n temp_snwe_list.append(meta_attr_dicts[-1]['SNWE'])\n temp_latlon_spacing_list.append([meta_attr_dicts[-1]['LAT_SPACING'],\n meta_attr_dicts[-1]['LON_SPACING']])\n\n # get sorted indices for frame bounds, from South to North\n # Sequential stitching starts from the most south frame and moves\n # forward to next one in the North direction\n # TODO: add option to reverse direction of stitching\n sorted_ix = np.argsort(np.array(temp_snwe_list)[:, 0], axis=0)\n\n # Loop through attributes\n snwe_list = [temp_snwe_list[ii] for ii in sorted_ix]\n latlon_spacing_list = [temp_latlon_spacing_list[ii] for ii in sorted_ix]\n\n # Loop through sorted frames, and stitch neighboring frames\n for i, (ix1, ix2) in enumerate(zip(sorted_ix[:-1], sorted_ix[1:])):\n if verbose:\n print('Frame-1:',\n meta_attr_dicts[ix1]['PATH'].split('\"')[1].split('/')[-1])\n print('Frame-2:',\n meta_attr_dicts[ix2]['PATH'].split('\"')[1].split('/')[-1])\n # Frame1\n frame1_meta_array = get_GUNW_array(meta_attr_dicts[ix1]['PATH'])\n # Frame2\n frame2_meta_array = get_GUNW_array(meta_attr_dicts[ix2]['PATH'])\n\n # Mask nodata values\n frame1_meta_array[frame1_meta_array ==\n meta_attr_dicts[ix1]['NODATA']] = np.nan\n frame2_meta_array[frame2_meta_array ==\n meta_attr_dicts[ix2]['NODATA']] = np.nan\n\n if i == 0:\n (corr_meta1, corr_meta2) = stitch_2frames_metadata(\n frame1_meta_array,\n meta_attr_dicts[ix1],\n frame2_meta_array,\n meta_attr_dicts[ix2],\n verbose=verbose)\n # Store corrected values\n corrected_meta_arrays = [corr_meta1, corr_meta2]\n else:\n (corr_meta1, corr_meta2) = stitch_2frames_metadata(\n corrected_meta_arrays[-1],\n meta_attr_dicts[ix1],\n frame2_meta_array,\n meta_attr_dicts[ix2],\n verbose=verbose)\n\n # Overwrite the last element in corrected arrays\n # TODO: check how to do this without using del\n del corrected_meta_arrays[-1]\n corrected_meta_arrays.extend([corr_meta1, corr_meta2])\n\n # Combine corrected unwrappedPhase arrays\n combined_meta, combined_snwe, _ = combine_data_to_single(\n corrected_meta_arrays, snwe_list,\n latlon_spacing_list, method='mean',\n latlon_step=[-0.1, 0.1])\n\n # replace nan with 0.0\n combined_meta = np.nan_to_num(combined_meta, nan=0.0)\n\n # Write\n # create temp files\n meta_out = output_meta.parent / (output_meta.name)\n # write stitched metadata product\n write_GUNW_array(meta_out, combined_meta, combined_snwe,\n format=output_format, verbose=verbose,\n add_vrt=True, nodata=0.0)",
"def concatenate_datasets(filenames_list, img_rows=128, img_cols=128):\n print('Concatenating the datasets created by data augmentation into a single one')\n print('Using the following pairs of images / masks datasets: ')\n print(filenames_list)\n print('\\n')\n\n # total number of images\n n_samples = 600 * len(filenames_list)\n\n # create np.ndarrays for the images and the targets: xCenter, yCenter, xOrientation, yOrientation\n images_dataset = np.ndarray((n_samples, img_rows, img_cols), dtype=np.uint8)\n targets_dataset = np.ndarray((n_samples, 4), dtype=np.float32)\n\n for ds, (img, mask) in enumerate(filenames_list):\n print(\" Processing {}\".format(img))\n images = np.load(\"output/augmented_data/{}.npy\".format(img))\n masks = np.load(\"output/augmented_data/{}.npy\".format(mask))\n\n for idx, mat in enumerate(masks):\n\n # get the center coordinates of the left ventricle (on the resized image)\n row, col = findCenter(img=mat, pixelvalue=1)\n\n # get the orientation of the left ventricle (on the resized image)\n x_v1, y_v1 = findMainOrientation(img=mat, pixelvalue=1)\n\n # save the center coordinates & orientation to the y dataframe (which will be the output of the network)\n targets_dataset[ds*600 + idx] = np.array([row, col, x_v1, y_v1])\n\n # save image in main dataset file\n images_dataset[ds*600 + idx] = images[idx]\n\n print('Concatenated all datasets into one & created target values for (center, orientation)')\n\n print('Splitting the dataset into 70% training & 30% testing')\n images_train, images_test, targets_train, targets_test = train_test_split(images_dataset, targets_dataset,\n test_size=0.3,\n random_state=42,\n shuffle=True)\n\n # save all ndarrays to a .npy files (for faster loading later)\n # Create directory to store files.\n directory = os.path.join(os.getcwd(), 'output/processed_data/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # save training set to file\n np.save('output/processed_data/images_train.npy', images_train)\n np.save('output/processed_data/targets_train.npy', targets_train)\n\n # save testing set to file\n np.save('output/processed_data/images_test.npy', images_test)\n np.save('output/processed_data/targets_test.npy', targets_test)\n print('Saving to .npy files done. See files: ')\n print('output/processed_data/images_train.npy')\n print('output/processed_data/targets_train.npy')\n print('output/processed_data/images_test.npy')\n print('output/processed_data/targets_test.npy')",
"def _get_headers_by_study(\n files: Set[Path], file_errors: DefaultDict[Path, List[str]]\n):\n study_key_type = Tuple[str, ...]\n studies: Dict[study_key_type, Dict[str, Any]] = {}\n indices: Dict[str, Dict[study_key_type, int]] = {}\n\n for file in files:\n if not file.is_file():\n continue\n with file.open(\"rb\") as f:\n try:\n # Read header only, skip reading the pixel data for now\n ds = pydicom.dcmread(f, stop_before_pixels=True)\n\n # Group by series instance uid or by stack ID (for 4D images)\n # Additionally also group by SOP class UID to skip over extra\n # raw data (dose reports for example) that are sometimes stored\n # under the same series instance UID.\n key: study_key_type = (\n ds.StudyInstanceUID,\n getattr(ds, \"StackID\", ds.SeriesInstanceUID),\n ds.SOPClassUID,\n )\n\n studies[key] = studies.get(key, {})\n indices[ds.StudyInstanceUID] = indices.get(\n ds.StudyInstanceUID, {}\n )\n\n try:\n index = indices[ds.StudyInstanceUID][key]\n except KeyError:\n index = len(indices[ds.StudyInstanceUID])\n indices[ds.StudyInstanceUID][key] = index\n\n headers = studies[key].get(\"headers\", [])\n headers.append({\"file\": file, \"data\": ds})\n studies[key][\"headers\"] = headers\n\n # Since we might need to combine multiple images with different\n # series instance UID (in 4D images), we cannot use the series\n # as the unique file name - instead, we use the study instance\n # uid and a counter (index) per study\n studies[key][\"name\"] = f\"{ds.StudyInstanceUID}-{index}\"\n\n except Exception as e:\n file_errors[file].append(format_error(str(e)))\n\n return studies",
"def get_images(algorithm=None):\n if algorithm == \"RMA\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_RMA.npy\").item()\n #dates = np.load(\"Dates_RMA.npy\")\n for i in range(n_im):\n i += i_o # Empieza en la posicion 10\n data = RMA.main(\"dset_\"+str(i)+\".hdf5\")\n #Ims[10+i] = data['Sf_n']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_RMA/Im_\"+str(i)+\".npy\",data['Sf_n'])\n #np.save(\"Set_images_RMA\",Ims) # Para guardar el set de imagenes\n np.save(\"Parameters_RMA\",data)\n np.save(\"Dates_RMA\",np.array(dates))\n\n elif algorithm == \"BP\":\n #Ims = {}\n dates = []\n #Ims = np.load(\"Set_images_BP.npy\").item()\n #dates = np.load(\"Dates_BP.npy\")\n for i in range(n_im): #(4991):\n i += i_o # Empieza en la posicion 10\n data = BP.main(\"dset_\"+str(i)+\".hdf5\") \n #Ims[i] = data['Im']\n dates.append(data['date'])\n np.save(os.getcwd()+\"/Results/Output_BP/Im_\"+str(i)+\".npy\",data['Im']) # Imagenes de todo el dataset\n np.save(\"Parameters_BP\",data) # Parametros geometricos como dimensiones y grilla de la imagen\n np.save(\"Dates_BP\",np.array(dates)) # Fechas de las iamgenes tomadas de todo el dset\n\n return 'Ok'",
"def load_all_dicom_images(self, verbose=True):\n if verbose: print(\"Loading dicom files ... This may take a moment.\")\n\n path = self.get_path_to_dicom_files()\n fnames = [fname for fname in os.listdir(path)\n if fname.endswith('.dcm') and not fname.startswith(\".\")]\n images = []\n for fname in fnames:\n image = dicom.dcmread(os.path.join(path,fname))\n\n seid = str(image.SeriesInstanceUID).strip()\n stid = str(image.StudyInstanceUID).strip()\n\n if seid == self.series_instance_uid and\\\n stid == self.study_instance_uid:\n images.append(image)\n\n # ##############################################\n # Clean multiple z scans.\n #\n # Some scans contain multiple slices with the same `z` coordinate \n # from the `ImagePositionPatient` tag.\n # The arbitrary choice to take the slice with lesser \n # `InstanceNumber` tag is made.\n # This takes some work to accomplish...\n zs = [float(img.ImagePositionPatient[-1]) for img in images]\n inums = [float(img.InstanceNumber) for img in images]\n inds = list(range(len(zs)))\n while np.unique(zs).shape[0] != len(inds):\n for i in inds:\n for j in inds:\n if i!=j and zs[i] == zs[j]:\n k = i if inums[i] > inums[j] else j\n inds.pop(inds.index(k))\n\n # Prune the duplicates found in the loops above.\n zs = [zs[i] for i in range(len(zs)) if i in inds]\n images = [images[i] for i in range(len(images)) if i in inds]\n\n # Sort everything by (now unique) ImagePositionPatient z coordinate.\n sort_inds = np.argsort(zs)\n images = [images[s] for s in sort_inds]\n # End multiple z clean.\n # ##############################################\n\n return images",
"def combine(self, output_file=\"./cutout.fits\", memory_only=False):\n\n hdu_list = []\n\n for ext_hdus in self.input_hdulists:\n \n new_header = self.combine_headers([hdu.header for hdu in ext_hdus])\n \n new_img = self.combine_images([hdu.data for hdu in ext_hdus])\n hdu_list.append(fits.ImageHDU(data=new_img, header=new_header))\n\n if memory_only:\n return get_fits(hdu_list, center_coord=self.center_coord)\n else:\n get_fits(hdu_list, center_coord=self.center_coord, output_path=output_file)\n return output_file"
]
| [
"0.62815243",
"0.6218348",
"0.62160033",
"0.58797866",
"0.58561873",
"0.57963276",
"0.5749888",
"0.571183",
"0.5645936",
"0.56389517",
"0.55614287",
"0.552315",
"0.55147547",
"0.5478095",
"0.5472695",
"0.54527247",
"0.54388314",
"0.542696",
"0.5388265",
"0.5378135",
"0.5371858",
"0.53317696",
"0.53173023",
"0.53033864",
"0.52945054",
"0.5285747",
"0.5276557",
"0.52735305",
"0.5251549",
"0.52392095"
]
| 0.6486676 | 0 |
Bgrd = get_nc_BGrid_GFDL(grdfile) Load BGrid grid object for GFDL CM2.1 from netCDF grid file | def get_nc_BGrid_GFDL(grdfile, name='GFDL_CM2.1_North_Pacific', area='regional', \
xrange=(60,175), yrange=(120, 190), ystart=235):
nc = pyroms.io.Dataset(grdfile)
lon_t = nc.variables['geolon_t'][:]
lat_t = nc.variables['geolat_t'][:]
lon_uv = nc.variables['geolon_c'][:]
lat_uv = nc.variables['geolat_c'][:]
h = nc.variables['ht'][:]
f = nc.variables['coriolis_param'][:]
kmt = nc.variables['kmt'][:]
z_t = nc.variables['st_ocean'][:]
z_t_edges = nc.variables['st_edges_ocean'][:]
kmu = nc.variables['kmu'][:]
z_uv = nc.variables['sw_ocean'][:]
z_uv_edges = nc.variables['sw_edges_ocean'][:]
# compute mask at t-point
M_t, L_t = kmt.shape
N_t = z_t.shape[0]
mask_t = np.zeros((N_t, M_t, L_t))
for j in range(M_t):
for i in range(L_t):
try:
mask_t[0:int(kmt[j,i]), j,i] = 1
except:
mask_t[:, j,i] = 0
# compute mask at uv-point
M_uv, L_uv = kmu.shape
N_uv = z_uv.shape[0]
mask_uv = np.zeros((N_uv, M_uv, L_uv))
for j in range(M_uv):
for i in range(L_uv):
try:
mask_uv[0:int(kmu[j,i]), j,i] = 1
except:
mask_uv[:, j,i] = 0
if area == 'npolar':
#add two rows in the north and the south
lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]
lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]
lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])
lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])
lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]
lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]
lat_t[0,:] = -85
lat_t[1,:] = -80
lat_t[-2,:] = 90
lat_t[-1,:] = 91
lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]
lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]
lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])
lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])
lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]
lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]
lat_uv[0,:] = -85
lat_uv[1,:] = -80
lat_uv[-2,:] = 90
lat_uv[-1,:] = 91
mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]
mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]
mask_t[:,:,0] = mask_t[:,:,-2]
mask_t[:,:,-1] = mask_t[:,:,1]
mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]
mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]
mask_uv[:,:,0] = mask_uv[:,:,-2]
mask_uv[:,:,-1] = mask_uv[:,:,1]
h = h[np.r_[0,0,:np.size(h,0),-1,-1]]
h = h[:,np.r_[0,:np.size(h,1),-1]]
h[:,0] = h[:,-2]
h[:,-1] = h[:,1]
f = f[np.r_[0,0,:np.size(f,0),-1,-1]]
f = f[:,np.r_[0,:np.size(f,1),-1]]
f[:,0] = f[:,-2]
f[:,-1] = f[:,1]
m,l = h.shape
xrange=(1,l-2)
yrange=(ystart+2,m-2)
if area == 'tripole':
#add two rows in the north and the south
fold1 = L_t//2
lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]
lon_t[-2,:fold1] = lon_t[-3,L_t:fold1-1:-1]
lon_t[-2,L_t:fold1-1:-1] = lon_t[-3,:fold1]
lon_t[-1,:fold1] = lon_t[-4,L_t:fold1-1:-1]
lon_t[-1,L_t:fold1-1:-1] = lon_t[-4,:fold1]
lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]
lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])
lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])
lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]
lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]
lat_t[0,:] = -85
lat_t[1,:] = -80
lat_t[-2,:] = lat_t[-3,:]
lat_t[-1,:] = lat_t[-4,:]
lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]
lon_uv[-2,:fold1] = lon_uv[-4,L_t:fold1-1:-1]
lon_uv[-2,L_t:fold1-1:-1] = lon_uv[-4,:fold1]
lon_uv[-1,:fold1] = lon_uv[-5,L_t:fold1-1:-1]
lon_uv[-1,L_t:fold1-1:-1] = lon_uv[-5,:fold1]
lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]
lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])
lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])
lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]
lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]
lat_uv[0,:] = -85
lat_uv[1,:] = -80
lat_uv[-2,:] = lat_uv[-3,:]
lat_uv[-1,:] = lat_uv[-4,:]
mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]
mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]
mask_t[:,:,0] = mask_t[:,:,-2]
mask_t[:,:,-1] = mask_t[:,:,1]
mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]
mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]
mask_uv[:,:,0] = mask_uv[:,:,-2]
mask_uv[:,:,-1] = mask_uv[:,:,1]
h = h[np.r_[0,0,:np.size(h,0),-1,-1]]
h = h[:,np.r_[0,:np.size(h,1),-1]]
h[:,0] = h[:,-2]
h[:,-1] = h[:,1]
f = f[np.r_[0,0,:np.size(f,0),-1,-1]]
f = f[:,np.r_[0,:np.size(f,1),-1]]
f[:,0] = f[:,-2]
f[:,-1] = f[:,1]
m,l = h.shape
xrange=(1,l-2)
yrange=(ystart+2,m-2)
return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \
mask_t, mask_uv, h, z_t, z_t_edges, \
z_uv, z_uv_edges, f, \
name, xrange=xrange, yrange=yrange) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ReadGrid(self, grdfile):\n nc = Dataset(grdfile,'r')\n \n self.xv = nc.variables['xv'][:]\n self.yv = nc.variables['yv'][:]\n self.xp = nc.variables['xp'][:]\n self.yp = nc.variables['yp'][:]\n self.xe = nc.variables['xe'][:]\n self.ye = nc.variables['ye'][:]\n self.dz = nc.variables['dz'][:] \n self.dv = nc.variables['dv'][:]\n self.Ac = nc.variables['Ac'][:]\n self.Nk = nc.variables['Nk'][:]\n self.face = nc.variables['face'][:]\n self.mark = nc.variables['mark'][:]\n\tself.cells = nc.variables['cells'][:]\n \n self.Nc = len(self.xv)\n self.Np = len(self.xp)\n self.Ne = len(self.xe)\n self.Nk = len(self.dz)\n self.numsides = self.face.shape[1]",
"def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])",
"def load_grd(filename):\n with open(filename, 'r') as f:\n meta = {}\n meta['header'] = []\n meta['header'].append(f.readline().rstrip('\\n'))\n while meta['header'][-1] != '++++':\n meta['header'].append(f.readline().rstrip('\\n'))\n # These determine the type of grid and the field format.\n meta['KTYPE'] = int(f.readline().split()[0])\n if meta['KTYPE'] != 1:\n raise ValueError(\"Not implemented.\")\n meta['NSET'], meta['ICOMP'], meta['NCOMP'], meta['IGRID'] = [int(s) for s in f.readline().split()]\n # The grid center in units of the x and y grid spacing.\n meta['IX'], meta['IY'] = [int(s) for s in f.readline().split()]\n # These are the x and y grid limits: S is lower, and E is upper.\n meta['XS'], meta['YS'], meta['XE'], meta['YE'] = [float(s) for s in f.readline().split()]\n # These are the numbers of grid points in x and y.\n meta['NX'], meta['NY'], meta['KLIMIT'] = [int(s) for s in f.readline().split()]\n # Implement this to read elliptically truncated grids.\n if meta['KLIMIT'] != 0:\n raise ValueError(\"Not implemented.\")\n # Load the field data. This returns an array with shape (NX * NY, 2 * NCOMP).\n conv = dict([(column, string_to_float) for column in range(2 * meta['NCOMP'])])\n data = np.loadtxt(f, dtype=float, converters=conv)\n # Determine the grid spacing and center values.\n meta['DX'] = (meta['XE'] - meta['XS']) / (meta['NX'] - 1)\n meta['DY'] = (meta['YE'] - meta['YS']) / (meta['NY'] - 1)\n meta['XCEN'] = meta['DX'] * meta['IX']\n meta['YCEN'] = meta['DY'] * meta['IY']\n # Reshape the data.\n map = np.empty((meta['NX'], meta['NY'], meta['NCOMP']),\n dtype=np.complex)\n for component in range(meta['NCOMP']):\n column = data[:, 2 * component] + 1j * data[:, 2 * component + 1]\n map[:, :, component] = column.reshape(meta['NX'], meta['NY'], order='F')\n return meta, map",
"def loadFromGrdecl(cls , filename):\n\n if os.path.isfile(filename):\n with open(filename) as f:\n specgrid = EclKW.read_grdecl(f, \"SPECGRID\", ecl_type=EclTypeEnum.ECL_INT_TYPE, strict=False)\n zcorn = EclKW.read_grdecl(f, \"ZCORN\")\n coord = EclKW.read_grdecl(f, \"COORD\")\n try:\n actnum = EclKW.read_grdecl(f, \"ACTNUM\", ecl_type=EclTypeEnum.ECL_INT_TYPE)\n except ValueError:\n actnum = None\n\n try:\n mapaxes = EclKW.read_grdecl(f, \"MAPAXES\")\n except ValueError:\n mapaxes = None\n\n return EclGrid.create( specgrid , zcorn , coord , actnum , mapaxes )\n else:\n raise IOError(\"No such file:%s\" % filename)",
"def loadFromFile(cls , filename):\n if FortIO.isFortranFile( filename ):\n return EclGrid( filename )\n else:\n return EclGrid.loadFromGrdecl( filename )",
"def read_grid(self, file_path=None):\n print('[info] reading the grid ...')\n if not file_path:\n file_path = os.path.join(self.directory, 'grid.dat')\n if not os.path.exists(file_path):\n file_path = os.path.join(self.directory, 'grid.txt')\n # test if file written in binary format\n textchars = bytearray({7, 8, 9, 10, 12, 13, 27}\n | set(range(0x20, 0x100)) - {0x7f})\n is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))\n infile = open(file_path, 'rb')\n binary_format = is_binary_string(infile.read(1024))\n infile.close()\n if binary_format:\n with open(file_path, 'rb') as infile:\n # x-direction\n nx = struct.unpack('i', infile.read(4))[0]\n x = numpy.array(struct.unpack('d' * (nx + 1),\n infile.read(8 * (nx + 1))))\n # y-direction\n ny = struct.unpack('i', infile.read(4))[0]\n y = numpy.array(struct.unpack('d' * (ny + 1),\n infile.read(8 * (ny + 1))))\n self.grid = numpy.array([x, y])\n else:\n with open(file_path, 'r') as infile:\n n_cells = numpy.array([int(n)\n for n in infile.readline().strip().split()])\n coords = numpy.loadtxt(infile, dtype=numpy.float64)\n self.grid = numpy.array(numpy.split(coords,\n numpy.cumsum(n_cells[:-1] + 1)))\n if self.grid.size == 2:\n print('\\tgrid-size: {}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n elif self.grid.size == 3:\n print('\\tgrid-size: {}x{}x{}'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))",
"def read_grid2d(grid_file):\n labels = []\n with grid_file.open('r') as f:\n for row in f.readlines():\n labels.append([x.strip() for x in row.split('\\t')])\n\n labels = array(labels)\n grid2d = make_grid(labels.shape[0], labels.shape[1])\n grid2d['label'] = labels\n return grid2d",
"def get_grid(self, gribfile, using_pygrib_derived_coords=False):\n gr = pygrib.open(gribfile)\n\n g = gr[1]\n\n latdim = g.Nj\n londim = g.Ni\n\n if not using_pygrib_derived_coords:\n try:\n latFirst = g.latitudeOfFirstGridPointInDegrees\n lonFirst = g.longitudeOfFirstGridPointInDegrees\n latLast = g.latitudeOfLastGridPointInDegrees\n lonLast = g.longitudeOfLastGridPointInDegrees\n dy = g.jDirectionIncrementInDegrees\n dx = g.iDirectionIncrementInDegrees\n latPole = g.latitudeOfSouthernPoleInDegrees\n lonPole = g.longitudeOfSouthernPoleInDegrees\n\n lons, lats = np.meshgrid(np.linspace(\n lonFirst, lonLast, londim), np.linspace(latFirst, latLast, latdim))\n\n if not latPole==0 and not lonPole==0:\n log.info('Found rotated coordinates - converting to regular coordinates')\n lons, lats = regrot.rot_to_reg(lonPole,latPole,lons,lats)\n\n except RuntimeError:\n using_pygrib_derived_coords = True\n warnings.warn('Falling back to pygrib derived coordinates')\n lats, lons = g.latlons()\n using_pygrib_derived_coords=True\n if using_pygrib_derived_coords:\n lats, lons = g.latlons()\n\n data_date = g.dataDate\n data_time = g.dataTime\n\n starttime = dt.datetime.strptime(('%i-%.2i')%(data_date,data_time),'%Y%m%d-%H%M')\n\n gr.close()\n\n return lats.flatten(), lons.flatten(), latdim, londim",
"def _load_grdfile(casename=None):\n \n data={} \n\n if casename==None:\n print('_load_grdfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_grd.dat','r')\n except IOError:\n print('_load_grdfiles: invalid case name.')\n return data\n\n nodes_str=fp.readline().split('=')\n elements_str=fp.readline().split('=')\n nnodes=int(nodes_str[1])\n nele=int(elements_str[1])\n t_data1=np.genfromtxt(casename+'_grd.dat',skip_header=2, skip_footer=nnodes,dtype='int64')\n t_data2=np.genfromtxt(casename+'_grd.dat',skip_header=2+nele,dtype='float64')\n fp.close()\n\n data['nnodes']=nnodes\n data['nele']=nele\n data['nodexy']=t_data2[:,1:3]\n data['x']=t_data2[:,1]\n data['y']=t_data2[:,2]\n data['nv']=t_data1[:,1:4].astype(int)-1\n data['trigridxy'] = mplt.Triangulation(data['x'], data['y'],data['nv'])\n \n return data",
"def _load_grid(self):\n\n grid_metrics = ['nbe', 'ntsn', 'nbsn', 'ntve', 'nbve', 'art1', 'art2', 'a1u', 'a2u']\n grid_variables = ['lon', 'lat', 'x', 'y', 'lonc', 'latc', 'xc', 'yc',\n 'h', 'siglay', 'siglev']\n\n # Get the grid data.\n for grid in grid_variables:\n try:\n setattr(self.grid, grid, self.ds.variables[grid][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[grid].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[grid], attribute))\n setattr(self.atts, grid, attributes)\n except KeyError:\n # Make zeros for this missing variable so we can convert from the non-missing data below.\n if grid.endswith('c'):\n setattr(self.grid, grid, np.zeros(self.dims.nele).T)\n else:\n setattr(self.grid, grid, np.zeros(self.dims.node).T)\n except ValueError as value_error_message:\n warn('Variable {} has a problem with the data. Setting value as all zeros.'.format(grid))\n print(value_error_message)\n setattr(self.grid, grid, np.zeros(self.ds.variables[grid].shape))\n\n # Load the grid metrics data separately as we don't want to set a bunch of zeros for missing data.\n for metric in grid_metrics:\n if metric in self.ds.variables:\n setattr(self.grid, metric, self.ds.variables[metric][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[metric].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[metric], attribute))\n setattr(self.atts, metric, attributes)\n\n # Fix the indexing and shapes of the grid metrics variables. Only transpose and offset indexing for nbe.\n try:\n if metric == 'nbe':\n setattr(self.grid, metric, getattr(self.grid, metric).T - 1)\n else:\n setattr(self.grid, metric, getattr(self.grid, metric))\n except AttributeError:\n # We don't have this variable, so just pass by silently.\n pass\n\n try:\n self.grid.nv = self.ds.variables['nv'][:].astype(int) # force integers even though they should already be so\n self.grid.triangles = copy.copy(self.grid.nv.T - 1) # zero-indexed for python\n except KeyError:\n # If we don't have a triangulation, make one.\n triangulation = tri.Triangulation(self.grid.lon, self.grid.lat)\n self.grid.triangles = triangulation.triangles\n self.grid.nv = self.grid.triangles.T + 1\n\n # Fix broken triangulations if necessary.\n if self.grid.nv.min() != 1:\n if self._debug:\n print('Fixing broken triangulation. Current minimum for nv is {} and for triangles is {} but they '\n 'should be 1 and 0, respectively.'.format(self.grid.nv.min(), self.grid.triangles.min()))\n self.grid.nv = (self.ds.variables['nv'][:].astype(int) - self.ds.variables['nv'][:].astype(int).min()) + 1\n self.grid.triangles = copy.copy(self.grid.nv.T) - 1\n\n # If we've been given an element dimension to subsample in, fix the triangulation here. We should really do\n # this for the nodes too.\n if 'nele' in self._dims:\n if self._debug:\n print('Fix triangulation table as we have been asked for only specific elements.')\n print('Triangulation table minimum/maximum: {}/{}'.format(self.grid.nv[:, self._dims['nele']].min(),\n self.grid.nv[:, self._dims['nele']].max()))\n # Redo the triangulation here too.\n new_nv = copy.copy(self.grid.nv[:, self._dims['nele']])\n for i, new in enumerate(np.unique(new_nv)):\n new_nv[new_nv == new] = i\n self.grid.nv = new_nv + 1\n self.grid.triangles = new_nv.T\n\n # Update dimensions to match those we've been given, if any. Omit time here as we shouldn't be touching that\n # dimension for any variable in use in here.\n for dim in self._dims:\n if dim != 'time':\n setattr(self.dims, dim, len(self._dims[dim]))\n\n # Add compatibility for FVCOM3 (these variables are only specified on the element centres in FVCOM4+ output\n # files). Only create the element centred values if we have the same number of nodes as in the triangulation.\n # This does not occur if we've been asked to extract an incompatible set of nodes and elements, for whatever\n # reason (e.g. testing). We don't add attributes for the data if we've created it as doing so is a pain.\n for var in 'h_center', 'siglay_center', 'siglev_center':\n try:\n setattr(self.grid, var, self.ds.variables[var][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[var].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[var], attribute))\n setattr(self.atts, var, attributes)\n except KeyError:\n if self.grid.nv.max() == len(self.grid.x):\n try:\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]), self.grid.triangles))\n except IndexError:\n # Maybe the array's the wrong way around. Flip it and try again.\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]).T, self.grid.triangles))\n\n # Convert the given W/E/S/N coordinates into node and element IDs to subset.\n if self._bounding_box:\n self._dims['node'] = np.argwhere((self.grid.lon > self._dims['wesn'][0]) &\n (self.grid.lon < self._dims['wesn'][1]) &\n (self.grid.lat > self._dims['wesn'][2]) &\n (self.grid.lat < self._dims['wesn'][3])).flatten()\n self._dims['nele'] = np.argwhere((self.grid.lonc > self._dims['wesn'][0]) &\n (self.grid.lonc < self._dims['wesn'][1]) &\n (self.grid.latc > self._dims['wesn'][2]) &\n (self.grid.latc < self._dims['wesn'][3])).flatten()\n\n # If we've been given dimensions to subset in, do that now. Loading the data first and then subsetting\n # shouldn't be a problem from a memory perspective because if you don't have enough memory for the grid data,\n # you probably won't have enough for actually working with the outputs. Also update dimensions to match the\n # given dimensions.\n if 'node' in self._dims:\n self.dims.node = len(self._dims['node'])\n for var in 'x', 'y', 'lon', 'lat', 'h', 'siglay', 'siglev':\n try:\n node_index = self.ds.variables[var].dimensions.index('node')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[node_index] = self.dims.node\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n else:\n for ni, node in enumerate(self._dims['node']):\n _temp[..., ni] = self.ds.variables[var][..., node]\n except KeyError:\n if 'siglay' in var:\n _temp = np.empty((self.dims.siglay, self.dims.node))\n elif 'siglev' in var:\n _temp = np.empty((self.dims.siglev, self.dims.node))\n else:\n _temp = np.empty(self.dims.node)\n setattr(self.grid, var, _temp)\n if 'nele' in self._dims:\n self.dims.nele = len(self._dims['nele'])\n for var in 'xc', 'yc', 'lonc', 'latc', 'h_center', 'siglay_center', 'siglev_center':\n try:\n nele_index = self.ds.variables[var].dimensions.index('nele')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[nele_index] = self.dims.nele\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n else:\n for ni, nele in enumerate(self._dims['nele']):\n _temp[..., ni] = self.ds.variables[var][..., nele]\n except KeyError:\n # FVCOM3 files don't have h_center, siglay_center and siglev_center, so make var_shape manually.\n if var.startswith('siglev'):\n var_shape = [self.dims.siglev, self.dims.nele]\n elif var.startswith('siglay'):\n var_shape = [self.dims.siglay, self.dims.nele]\n else:\n var_shape = self.dims.nele\n _temp = np.zeros(var_shape)\n setattr(self.grid, var, _temp)\n\n # Check if we've been given vertical dimensions to subset in too, and if so, do that. Check we haven't\n # already done this if the 'node' and 'nele' sections above first.\n for var in 'siglay', 'siglev', 'siglay_center', 'siglev_center':\n short_dim = copy.copy(var)\n # Assume we need to subset this one unless 'node' or 'nele' are missing from self._dims. If they're in\n # self._dims, we've already subsetted in the 'node' and 'nele' sections above, so doing it again here\n # would fail.\n subset_variable = True\n if 'node' in self._dims or 'nele' in self._dims:\n subset_variable = False\n # Strip off the _center to match the dimension name.\n if short_dim.endswith('_center'):\n short_dim = short_dim.split('_')[0]\n if short_dim in self._dims:\n if short_dim in self.ds.variables[var].dimensions and subset_variable:\n _temp = getattr(self.grid, var)[self._dims[short_dim], ...]\n setattr(self.grid, var, _temp)\n\n # Check ranges and if zero assume we're missing that particular type, so convert from the other accordingly.\n self.grid.lon_range = np.ptp(self.grid.lon)\n self.grid.lat_range = np.ptp(self.grid.lat)\n self.grid.lonc_range = np.ptp(self.grid.lonc)\n self.grid.latc_range = np.ptp(self.grid.latc)\n self.grid.x_range = np.ptp(self.grid.x)\n self.grid.y_range = np.ptp(self.grid.y)\n self.grid.xc_range = np.ptp(self.grid.xc)\n self.grid.yc_range = np.ptp(self.grid.yc)\n\n # Only do the conversions when we have more than a single point since the relevant ranges will be zero with\n # only one position.\n if self.dims.node > 1:\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.lon, self.grid.lat = lonlat_from_utm(self.grid.x, self.grid.y, zone=self._zone)\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.x, self.grid.y, _ = utm_from_lonlat(self.grid.lon, self.grid.lat)\n if self.dims.nele > 1:\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.lonc, self.grid.latc = lonlat_from_utm(self.grid.xc, self.grid.yc, zone=self._zone)\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.xc, self.grid.yc, _ = utm_from_lonlat(self.grid.lonc, self.grid.latc)",
"def read_grid(filename_grid, dim=2, slc=None):\n ## get shape and slice\n fid = h5py.File(filename_grid, 'r')\n if dim==2:\n varnames = ['x', 'y', 'ep']\n if slc is None: slc = np.s_[0,:,:]\n if dim==3:\n varnames = ['x', 'y', 'z', 'ep']\n if slc is None: slc = np.s_[:,:,:]\n\n dset = fid.get(varnames[0])\n shape = dset[slc].shape\n (nx,ny,nz) = dset.shape\n ## read variables\n grid = {}\n for varname in varnames:\n try:\n dset = fid.get(varname)\n grid[varname] = np.zeros(shape)\n dset.read_direct(grid[varname], source_sel=slc)\n grid[varname] = grid[varname].transpose()\n except:\n pass\n fid.close()\n return grid, nx, ny, nz",
"def loadFrom2DUgrid(self, fileAndMeshName):\n LIB.mnt_grid_loadFrom2DUgrid.argtypes = [POINTER(c_void_p), c_char_p]\n fm = fileAndMeshName.encode('utf-8')\n ier = LIB.mnt_grid_loadFrom2DUgrid(self.obj, fm)\n if ier:\n error_handler(FILE, 'loadFrom2DUgrid', ier)",
"def load_target_grid(self):\n\n # load the target grid name (expected to be in the settings.txt file)\n self.grid_name = (self.st['directory_metadata'][0] +\n self.st[\"target_grid\"][0])\n\n if os.path.exists(self.grid_name):\n\n # open the metadata file\n self.file = netCDF4.Dataset(self.grid_name)\n\n # laod lat/lon\n self.lat = self.file.variables[\"latitude\"][:, :]\n self.lon = self.file.variables[\"longitude\"][:, :]\n\n try:\n\n # Atributos globais para serem lidos no thredds\n self.GRIDTYPE = getattr(self.file, \"GRIDTYPE\")\n self.MAP_PROJ = getattr(self.file, \"MAP_PROJ\")\n self.CEN_LON = getattr(self.file, \"CEN_LON\")\n self.MAP_PROJ_CHAR = getattr(self.file, \"MAP_PROJ_CHAR\")\n self.STAND_LON = getattr(self.file, \"STAND_LON\")\n self.TRUELAT1 = getattr(self.file, \"TRUELAT1\")\n self.TRUELAT2 = getattr(self.file, \"TRUELAT2\")\n self.CEN_LAT = getattr(self.file, \"CEN_LAT\")\n self.DX = getattr(self.file, \"DX\")\n self.DY = getattr(self.file, \"DY\")\n self.MOAD_CEN_LAT = getattr(self.file, \"MOAD_CEN_LAT\")\n\n except ValueError:\n pass\n\n # Close the file\n self.file.close()\n\n else:\n\n l1 = \"WARNING\"\n l2 = \"Target Grid: %s not found\" % self.grid_name\n l3 = \"Can't proceed\"\n l4 = \"Shutting down the program\"\n print(\"\")\n print(int(max([len(l1), len(l2), len(l3), len(l4)]) / 2 -\n len(l1) / 2) * \" \" + l1)\n print(l2)\n print(l3)\n print(l4)\n print(\"\")\n sys.exit()",
"def import_grid(file_name):\n\n return FileReader(file_name=file_name).grid",
"def __init__(self, path, grid_path=\"./\", grids=['T', 'U', 'V', 'W'],\n decode_times=True,\n\t chunks=None, autoclose=False):\n\t\tself.open_grid_files(grid_path)\n\t\tdef open_files(filenames):\n\t\t\tds = (xr.open_mfdataset(filenames,\n\t\t\t decode_times=decode_times,\n\t\t\t autoclose=autoclose,\n\t\t\t data_vars='minimal')\n .set_coords(['nav_lon', 'nav_lat'])\n\t\t\t )\n\t\t\tds = ds.chunk(chunks=chunks)\n\t\t\treturn ds\n\t\tif glob.glob(path + \"/*gridT*.nc\") and ('T' in grids):\n\t\t\tself.gridT = open_files(path + \"/*gridT*.nc\")\n\t\tif glob.glob(path + \"/*gridU*.nc\") and ('U' in grids):\n\t\t\tself.gridU = open_files(path + \"/*gridU*.nc\")\n\t\tif glob.glob(path + \"/*gridV*.nc\") and ('V' in grids):\n\t\t\tself.gridV = open_files(path + \"/*gridV*.nc\")\n\t\tif glob.glob(path + \"/*gridW*.nc\") and ('W' in grids):\n\t\t\tself.gridW = open_files(path + \"/*gridW*.nc\")\n\t\tif glob.glob(path + \"/*flxT*.nc\") and ('T' in grids):\n\t\t\tself.flxT = open_files(path + \"/*flxT*.nc\")",
"def read_field_from_grib_file(\n grib_file_name, field_name_grib1, model_name, grid_id=None,\n temporary_dir_name=None, wgrib_exe_name=grib_io.WGRIB_EXE_NAME_DEFAULT,\n wgrib2_exe_name=grib_io.WGRIB2_EXE_NAME_DEFAULT,\n raise_error_if_fails=True):\n\n num_grid_rows, num_grid_columns = nwp_model_utils.get_grid_dimensions(\n model_name=model_name, grid_name=grid_id)\n\n return grib_io.read_field_from_grib_file(\n grib_file_name=grib_file_name, field_name_grib1=field_name_grib1,\n num_grid_rows=num_grid_rows, num_grid_columns=num_grid_columns,\n sentinel_value=nwp_model_utils.SENTINEL_VALUE,\n temporary_dir_name=temporary_dir_name, wgrib_exe_name=wgrib_exe_name,\n wgrib2_exe_name=wgrib2_exe_name,\n raise_error_if_fails=raise_error_if_fails)",
"def load_surfer(fname, fmt='ascii'):\n assert fmt in ['ascii', 'binary'], \"Invalid grid format '%s'. Should be \\\n 'ascii' or 'binary'.\" % (fmt)\n if fmt == 'ascii':\n # Surfer ASCII grid structure\n # DSAA Surfer ASCII GRD ID\n # nCols nRows number of columns and rows\n # xMin xMax X min max\n # yMin yMax Y min max\n # zMin zMax Z min max\n # z11 z21 z31 ... List of Z values\n with open(fname) as ftext:\n # DSAA is a Surfer ASCII GRD ID\n id = ftext.readline()\n # Read the number of columns (ny) and rows (nx)\n ny, nx = [int(s) for s in ftext.readline().split()]\n shape = (nx, ny)\n # Read the min/max value of columns/longitue (y direction)\n ymin, ymax = [float(s) for s in ftext.readline().split()]\n # Read the min/max value of rows/latitude (x direction)\n xmin, xmax = [float(s) for s in ftext.readline().split()]\n area = (xmin, xmax, ymin, ymax)\n # Read the min/max value of grid values\n datamin, datamax = [float(s) for s in ftext.readline().split()]\n data = numpy.fromiter((float(i) for line in ftext for i in\n line.split()), dtype='f')\n data = numpy.ma.masked_greater_equal(data, 1.70141e+38)\n assert numpy.allclose(datamin, data.min()) \\\n and numpy.allclose(datamax, data.max()), \\\n \"Min and max values of grid don't match ones read from file.\" \\\n + \"Read: ({}, {}) Actual: ({}, {})\".format(\n datamin, datamax, data.min(), data.max())\n # Create x and y coordinate numpy arrays\n x, y = regular(area, shape)\n if fmt == 'binary':\n raise NotImplementedError(\n \"Binary file support is not implemented yet.\")\n return x, y, data, shape",
"def create_object(self, confM2R, grd_filename):\n ds = xr.open_dataset(grd_filename)\n\n if self.type == 'FORCINGDATA':\n\n logging.info(\"[M2R_grd] ---> Assuming {} grid type for {}\".format(confM2R.grd_type, self.type))\n logging.info(\"[M2R_grd] ---> Using dimension names {} and {} and {}\".format(confM2R.lon_name,\n confM2R.lat_name,\n confM2R.depth_name))\n\n self.lon = ds[str(confM2R.lon_name)][:]\n self.lat = ds[str(confM2R.lat_name)][:]\n self.h = ds[str(confM2R.depth_name)][:]\n self.nlevels = len(self.h)\n self.fillval = -9.99e+33\n self.hc = None\n\n if self.lon.ndim == 1:\n self.lon, self.lat = np.meshgrid(self.lon, self.lat)\n\n # Create grid for ESMF interpolation\n\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lon_name), str(confM2R.lat_name)],\n add_mask=False)\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_u), str(confM2R.lat_name_u)],\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[str(confM2R.lon_name_v), str(confM2R.lat_name_v)],\n add_mask=False)\n\n if confM2R.ocean_indata_type == 'SODA3':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'SODA3_5DAY':\n self.fillval = -1.e+20\n if confM2R.ocean_indata_type == 'GLORYS':\n self.fillval = 9.96921e+36\n\n if confM2R.ocean_indata_type == 'NORESM':\n # self.h = ds[\"depth\"][:]\n self.h = np.asarray([0, 5, 10, 15, 20, 25, 30, 40, 50, 62.5, 75, 87.5, 100, 112.5, 125,\n 137.5, 150, 175, 200, 225, 250, 275, 300, 350, 400, 450, 500, 550, 600,\n 650, 700, 750, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,\n 1300, 1350, 1400, 1450, 1500, 1625, 1750, 1875, 2000, 2250, 2500, 2750,\n 3000, 3250, 3500, 3750, 4000, 4250, 4500, 4750, 5000, 5250, 5500, 5750,\n 6000, 6250, 6500, 6750])\n self.fillval = 32768\n self.nlevels = len(self.h)\n\n IOverticalGrid.get_z_levels(self)\n\n if self.type == 'STATION':\n self.lon = ds[confM2R.lon_name][:]\n self.lat = ds[confM2R.lat_name][:]\n self.h = ds[confM2R.depth_name][:]\n self.time = ds[confM2R.time_name][:]\n\n self.Lp = 1\n self.Mp = 1\n self.fillval = -9.99e+33\n\n if self.type in ['ROMS']:\n\n self.write_clim = True\n self.write_bry = True\n self.write_init = True\n self.write_stations = False\n\n self.lonname = 'lon_rho'\n self.latname = 'lat_rho'\n\n \"\"\"\n Set initTime to 1 if you dont want the first time-step to be\n the initial field (no ubar and vbar if time=0)\n \"\"\"\n\n self.inittime = 0\n self.ocean_time = 0\n self.NT = 2\n self.tracer = self.NT\n\n self.message = None # Used to store the date for printing to screen (IOwrite.py)\n self.time = 0\n self.reftime = 0\n self.grdtype = 'regular'\n self.mask_rho = ds[\"mask_rho\"][:, :]\n self.lon_rho = ds[\"lon_rho\"][:, :]\n self.lat_rho = ds[\"lat_rho\"][:, :]\n self.h = ds[\"h\"][:, :]\n\n masked_h = np.where(self.h > 0, self.h, self.h.max())\n\n self.hmin = masked_h.min()\n if \"Vtransform\" in ds.variables:\n self.vtransform = ds[\"Vtransform\"].values\n else:\n self.vtransform = confM2R.vtransform\n\n if \"s_rho\" in ds.variables:\n self.s_rho = ds[\"s_rho\"].values\n self.nlevels = len(self.s_rho)\n else:\n self.nlevels = confM2R.nlevels\n\n if \"Vstretching\" in ds.variables:\n self.vstretching = ds[\"Vstretching\"].values\n if \"theta_s\" in ds.variables:\n self.theta_s = ds[\"theta_s\"].values\n else:\n self.theta_s = confM2R.theta_s\n if \"theta_b\" in ds.variables:\n self.theta_b = ds[\"theta_b\"].values\n else:\n self.theta_b = confM2R.theta_b\n if \"Tcline\" in ds.variables:\n self.tcline = ds[\"Tcline\"].values\n else:\n self.tcline = confM2R.tcline\n if \"hc\" in ds.variables:\n self.hc = ds[\"hc\"].values\n else:\n self.hc = confM2R.hc\n\n if self.vtransform == 1:\n self.hc = min(self.hmin, self.tcline)\n self.hc = self.tcline\n if self.tcline > self.hmin:\n print('Vertical transformation parameters are not defined correctly in either gridid.txt '\n 'or in the history files: \\n Tc\\\n line = %d and hmin = %d. \\n You need to make sure that '\n 'tcline <= hmin when using transformation 1.' % (\n self.tcline, self.hmin))\n else:\n self.hc = self.tcline\n\n zeta = None\n if zeta is None:\n self.zeta = np.zeros(self.h.shape)\n else:\n self.zeta = zeta\n\n # for findvar in ds:\n # if findvar==\"hraw\":\n # self.hraw = ds[\"hraw\"][:,:,:]\n\n self.lon_u = ds[\"lon_u\"][:, :]\n self.lat_u = ds[\"lat_u\"][:, :]\n self.mask_u = ds[\"mask_u\"][:, :]\n for findvar in ds:\n if findvar == \"lon_vert\":\n self.lon_vert = ds[\"lon_vert\"][:, :]\n self.lat_vert = ds[\"lat_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_rho\":\n self.x_rho = ds[\"x_rho\"][:, :]\n self.y_rho = ds[\"y_rho\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_u\":\n self.x_u = ds[\"x_u\"][:, :]\n self.y_u = ds[\"y_u\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_v\":\n self.x_v = ds[\"x_v\"][:, :]\n self.y_v = ds[\"y_v\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_psi\":\n self.x_psi = ds[\"x_psi\"][:, :]\n self.y_psi = ds[\"y_psi\"][:, :]\n\n for findvar in ds:\n if findvar == \"x_vert\":\n self.x_vert = ds[\"x_vert\"][:, :]\n self.y_vert = ds[\"y_vert\"][:, :]\n\n for findvar in ds:\n if findvar == \"xl\":\n self.xl = ds[\"xl\"]\n self.el = ds[\"el\"]\n\n for findvar in ds:\n if findvar == \"dmde\":\n self.dmde = ds[\"dmde\"][:, :]\n self.dndx = ds[\"dndx\"][:, :]\n\n self.lon_v = ds[\"lon_v\"][:, :]\n self.lat_v = ds[\"lat_v\"][:, :]\n self.mask_v = ds[\"mask_v\"][:, :]\n\n # self.spherical = ds[\"spherical\"][:]\n\n self.lon_psi = self.lon_u[:-1, :]\n self.lat_psi = self.lat_v[:, :-1]\n self.mask_psi = self.mask_v[:, :-1]\n\n # self.f = ds[\"f\"][:, :]\n self.angle = ds[\"angle\"][:, :]\n\n self.pm = ds[\"pm\"][:, :]\n self.invpm = 1.0 / np.asarray(ds[\"pm\"][:, :])\n self.pn = ds[\"pn\"][:, :]\n self.invpn = 1.0 / np.asarray(ds[\"pn\"][:, :])\n\n self.Lp = len(self.lat_rho[1, :])\n self.Mp = len(self.lat_rho[:, 1])\n\n self.fillval = -9.99e33\n\n self.eta_rho = self.Mp\n self.eta_u = self.Mp\n self.eta_v = self.Mp - 1\n self.eta_psi = self.Mp - 1\n self.xi_rho = self.Lp\n self.xi_u = self.Lp - 1\n self.xi_v = self.Lp\n self.xi_psi = self.Lp - 1\n\n # Boolean to check if we need to initialize the CLIM file before writing\n self.ioClimInitialized = False\n self.ioInitInitialized = False\n\n if self.lon_rho.ndim == 1:\n self.lon_rho, self.lat_rho = np.meshgrid(self.lon_rho, self.lat_rho)\n self.lon_u, self.lat_u = np.meshgrid(self.lon_u, self.lat_u)\n self.lon_v, self.lat_v = np.meshgrid(self.lon_v, self.lat_v)\n\n # Setup the vertical coordinate system\n IOverticalGrid.calculateVgrid(self)\n\n self.esmfgrid_u = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n coord_names=['lon_u', 'lat_u'],\n is_sphere=True,\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=['lon_v', 'lat_v'],\n add_mask=False)\n self.esmfgrid = ESMF.Grid(filename=grd_filename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True,\n coord_names=[self.lonname, self.latname],\n add_mask=False)",
"def read_BEC_region_mask(grid):\n region_mask_file = '/home/ivan/Python/data/BEC_REGION_MASK_%s.nc'%grid\n fpreg = Nio.open_file(region_mask_file, 'r')\n nreg = fpreg.dimensions['nreg']\n region_mask = fpreg.variables['REGION_MASK'][:]\n\n # get region long names\n region_lname = [''.join(fpreg.variables['REGION_lname'][n,:])\n for n in range(nreg)]\n # get region short names\n region_sname = [''.join(fpreg.variables['REGION_sname'][n,:])\n for n in range(nreg)]\n\n fpreg.close()\n return region_mask, nreg, region_lname, region_sname",
"def load(self, filename):\n LIB.mnt_grid_load.argtypes = [POINTER(c_void_p), c_char_p]\n fm = filename.encode('utf-8')\n ier = LIB.mnt_grid_load(self.obj, fm)\n if ier:\n error_handler(FILE, 'load', ier)",
"def get_grid_data(grid):\n indir = '/home/ivan/Tools/scrip/mapping/grids'\n infile = os.path.join(indir, grid + '.nc')\n fp = Nio.open_file(infile,'r')\n nlon, nlat = fp.variables['grid_dims'][:]\n tlat = fp.variables['grid_center_lat'][:]\n tlon = fp.variables['grid_center_lon'][:]\n fp.close()\n tlat = N.reshape(tlat,(nlat,nlon))[:,0]\n tlon = N.reshape(tlon,(nlat,nlon))[0,:]\n return nlon, nlat, tlon, tlat",
"def load_bb(filename):\n in_data = gdal.Open(filename, 0)\n geotransform = in_data.GetGeoTransform()\n nx = in_data.RasterXSize\n ny = in_data.RasterYSize\n return geotransform2bb(geotransform, nx, ny)",
"def read_ecog2d(ecog_file, grid_file):\n ecog = loadtxt(ecog_file, delimiter='\\t')\n\n ecog_on_grid = zeros(ecog.shape, dtype=DTYPE_ECOG)\n ecog_on_grid['value'] = ecog\n ecog_on_grid['good'] = ~isnan(ecog)\n ecog_on_grid['label'] = read_grid2d(grid_file)['label']\n\n return ecog_on_grid",
"def load_field(self,filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n if not check_for_grid_info:\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n print(\"Reading input from {0}\".format(filename))\n with netCDF4.Dataset(filename,mode='r',format='NETCDF4') as dataset:\n if check_for_grid_info:\n latitudes = None\n longitudes = None\n for latitude_names in ['lat','y']:\n fields = dataset.get_variables_by_attributes(name=latitude_names)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n latitudes = fields[0][:]\n for longitude_names in ['lon','long','x']:\n fields = dataset.get_variables_by_attributes(name=longitude_names)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n longitudes = fields[0][:]\n elif len(fields) > 1:\n raise RuntimeError(\"File {0} contains\"\n \" multiple longitude fields\".format(filename))\n elif len(fields) > 1:\n raise RuntimeError(\"File {0} contains\"\n \" multiple latitude fields\".format(filename))\n if longitudes is not None:\n grid = gd.makeGrid('LatLong',nlat=len(latitudes),nlong=len(longitudes))\n grid.set_latitude_points(np.asarray(latitudes))\n grid.set_longitude_points(np.asarray(longitudes))\n grid_info.append(grid)\n else:\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n fields = None\n if fieldname is None:\n potential_field_names = ['Topo','topo','field_value','orog','z','ICEM',\n 'DEPTO','usurf','bats','slm','FDIR','lsmask',\n 'lake_field','river_flow',\n 'basin_catchment_numbers','rdirs','lsm',\n \"cumulative_flow\",\"catchments\",\n \"cumulative_flow_to_ocean\",\"acc\",\"catch\",\"rdir\"]\n else:\n potential_field_names = [fieldname]\n for potential_field_name in potential_field_names:\n fields = dataset.get_variables_by_attributes(name=potential_field_name)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n if timeslice is not None:\n field_slice = fields[0][timeslice,:,:]\n else:\n field_slice = fields[0][:]\n if grid_type==\"generic_1d\":\n if unmask:\n return np.asarray(field_slice)\n else:\n return np.asanyarray(field_slice)\n else:\n if unmask:\n return np.asarray(field_slice.reshape(grid.get_grid_dimensions()))\n else:\n return np.asanyarray(field_slice.reshape(grid.get_grid_dimensions()))\n elif len(fields) > 1:\n raise RuntimeError('File {0} contains multiple fields'.format(filename))\n else:\n raise RuntimeError('Field not found in file {0}'.format(filename))",
"def load_MegaGrid(filename):\n pickle_file = open(filename, 'rb')\n newMegaGrid = pickle.load(pickle_file)\n\n return newMegaGrid",
"def from_cdo_griddes(griddes):\n\n with open(griddes) as grid_file:\n grid_file_lines = grid_file.readlines()\n\n grid_dic = {}\n\n for line in grid_file_lines:\n words = line.split()\n if words[0] == '#':\n continue\n else:\n length = len(words)\n if length == 3:\n grid_dic[words[0]] = words[2]\n else:\n value_string = ' '.join(words[2:length-1])\n grid_dic[words[0]] = value_string\n\n if grid_dic['gridtype'] != 'lonlat':\n print(('Gridtype {0} not supported'.format(grid_dic['gridtype'])))\n return ''\n\n lon = np.zeros(int(grid_dic['xsize']))\n lat = np.zeros(int(grid_dic['ysize']))\n\n for i in range(len(lon)):\n lon[i] = float(grid_dic['xfirst']) + i * float(grid_dic['xinc'])\n for j in range(len(lat)):\n lat[j] = float(grid_dic['yfirst']) + j * float(grid_dic['yinc'])\n\n if grid_dic['xname'] == 'rlon':\n pol_lon = float(grid_dic['xnpole'])\n pol_lat = float(grid_dic['ynpole'])\n grid = RotGrid(lon, lat, pol_lon, pol_lat)\n else:\n grid = Grid(lon, lat)\n\n return grid",
"def load_field(self, filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n print(\"Reading input from {0}\".format(filename))\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n return np.loadtxt(filename,np.float64).reshape(grid.get_grid_dimensions())",
"def load_rbc( fname, skiprows, nx, ny ):\n C = numpy.loadtxt( fname, skiprows=skiprows ) \n cell_frames = [ C[i].reshape(( nx,ny )) for i in range( 5000-skiprows ) ]\n return cell_frames",
"def get_from_gridfs(d, f):\n fs = gridfs.GridFS(d)\n b = fs.get(f).read()\n return b",
"def __init__(self, grdtype, confM2R):\n self.type = grdtype\n self.grdName = confM2R.outgrid_name\n self.realm = confM2R.realm\n\n logging.info(\"[M2R_grd] Creating init for grid object {}\".format(confM2R.outgrid_name))\n logging.info(\"[M2R_grd]---> Initialized GRD object for grid type {}\\n\".format(self.type))"
]
| [
"0.68723994",
"0.66056603",
"0.65547323",
"0.64402777",
"0.616281",
"0.61344266",
"0.5933896",
"0.59285045",
"0.59139436",
"0.58916956",
"0.587016",
"0.58692986",
"0.58584356",
"0.5805568",
"0.57237524",
"0.570327",
"0.56910247",
"0.5652674",
"0.56325096",
"0.56223077",
"0.56194425",
"0.56168926",
"0.55960816",
"0.555651",
"0.55483407",
"0.5540517",
"0.5531691",
"0.55014855",
"0.54863364",
"0.547267"
]
| 0.7247142 | 0 |
Method properTimeInput returns whether the user entered a valid time | def properTimeInput(time_):
if not time_.isdigit() or len(time_) > 4 or len(time_) < 4 or int(time_) > 2400 or int(time_) < 0 or int(time_[2])>5:
print("'",time_, "' is an invalid input for the time. Use 24 hr format.\nExamples: 8 a.m = 0800, 1 p.m = 1300, 2:30 = 1430, 12:50 a.m = 0050\n")
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def time_is_valid(request, day, time, name):\n\n\tif ((day != '0' and day != '6') and time.hour == 21) or time.minute != 0:\n\t\treturn False\n\n\t# George's time\n\tif name != \"George Yeh\" and day == '6' and time.hour >= 9 and time.hour < 12:\n\t\treturn False\n\n\treturn True",
"def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True",
"def valid_time(time):\n if time.hour < 0 or time.minute < 0 or time.second < 0:\n return False\n if time.minute >= 60 or time.second >= 60:\n return False\n return True",
"def validate_and_parse_input(time: str):\n if time is None or not re.match(r'^\\d{1,2}:\\d{1,2}$', time):\n return False\n hour, minute = map(int, time.split(r':'))\n if type(hour) != int or type(minute) != int:\n return False\n\n if 0 <= hour < 24 and 0 <= minute < 60:\n hour = hour % 12\n minute = minute\n return hour, minute\n else:\n return False",
"def is_valid_time(time):\n try:\n dateutil.parser.parse(time)\n return True\n except dateutil.parser.ParserError:\n return False",
"def test_check_args_submit_time(self):\n test_time = \"2021/06/18 11:00:00\"\n with self.assertRaises(TypeError) as context:\n self.duedate.check_args(test_time, self.test_turn_time)\n self.assertTrue(\"Invalid input format. 'submit_time' must be <datetime> format.\" in str(\n context.exception))",
"def _verify_time(given_time):\n\t\treturn int(given_time)",
"def is_time(time_string, time_format=''):\n if time_string is None:\n return False\n elif isinstance(time_string, datetime):\n return True\n\n try:\n parse_time(time_string, time_format)\n except ValueError:\n return False\n else:\n return True",
"def is_primary_time(time_string):\n return ':00' in time_string or ':30' in time_string",
"def check_time(startTime, endTime):\n\n now = datetime.now()\n startTimeObj = datetime.strptime(startTime, '%I:%M%p')\n endTimeObj = datetime.strptime(startTime, '%I:%M%p')\n\n if startTimeObj.hour <= now.hour <= endTimeObj.hour and \\\n startTimeObj.minute <= now.minute <= endTimeObj.minute:\n return True",
"def validate_duration_input(duration):\n if duration.isdigit():\n duration = int(duration)\n clear()\n return duration\n\n else:\n clear()\n print('** Please enter time spent on task '\n 'rounded to nearest whole minute **')\n return False",
"def is_time_in_given_format(time_string, time_format):\n try:\n datetime.strptime(time_string, time_format)\n return True\n except ValueError:\n return False",
"def _check_and_convert_time(time_input, assign_default_time=False):\n\n try:\n if isinstance(time_input, str): # input time_input as string\n if time_input.replace(\n \".\", \"\", 1\n ).isdigit(): # input time_input as numeric string\n time_input = (\n float(time_input)\n if \".\" in time_input\n else int(time_input) / 1000.0\n )\n else: # input time_input as datetime string\n time_input = dateutil.parser.parse(time_input).timestamp()\n elif isinstance(\n time_input, int\n ): # input time_input as epoch timestamps in milliseconds\n time_input = time_input / 1000.0\n elif isinstance(time_input, datetime):\n time_input = time_input.timestamp()\n\n datetime.fromtimestamp(time_input) # check current time_input is valid\n except Exception:\n if assign_default_time:\n logging.info(\n \"Cannot convert time_input into timestamps: {}\".format(time_input)\n )\n time_input = time.time()\n else:\n raise ValueError(\n \"Cannot convert time_input into timestamps: {}\".format(time_input)\n )\n\n return time_input",
"def test_time_must_be_valid(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': '11',\n 'number_players': '1',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01-01 at 13:00'\n }\n )\n self.assertTrue('Time must be a valid format' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)",
"def enter_searching_time():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'time': ''}\n\n while not valid_data:\n input_data['time'] = get_input(\"Enter the time spent your are interested in (min)\")\n if re.match('\\d+', input_data['time']):\n valid_data = True\n clean_scr()\n else:\n input(\"Enter valid minutes value.\")\n\n return input_data['time']",
"def isRestrictionTime(time_str):\n ValidTime.validateTime(time_str)\n time = datetime.strptime(time_str, \"%H:%M\").time()\n morning_ini, morning_fin = PicoPlaca.__getTimeRestriction(\"M\")\n if morning_ini <= time <= morning_fin:\n return True\n\n afternoon_ini, afternoon_fin = PicoPlaca.__getTimeRestriction(\"A\")\n if afternoon_ini <= time <= afternoon_fin:\n return True\n\n return False",
"def is_valid(self):\n if self.hour < 0 or self.minute < 0 or self.second < 0:\n return False\n if self.minute >= 60 or self.second >= 60:\n return False\n return True",
"def time_format(time):\n task_time = time\n formatted = True\n while formatted:\n try:\n int(task_time)\n formatted = False\n clear()\n except ValueError:\n clear()\n task_time = input(\"Please submit the time in rounded minutes: \\n>\")\n return task_time",
"def test_parse_time(\n test_input: str,\n expected: datetime.time,\n):\n assert tvmaze.parsers.parse_time(test_input) == expected",
"def valid_format(self):\n\n # If candidate is None, return true\n if not self.dt:\n print \"dt empty\"\n return True\n\n # Verify if time format is ok and stores in into a time-tuple format\n try:\n stime = datetime.strptime(self.dt, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return False\n else:\n return True",
"def is_time(self) -> bool:\n return self.times > 1",
"def validate_time(self) -> str:\n return pulumi.get(self, \"validate_time\")",
"def get_time(custom_text):\n fmt = '%H:%M:%S'\n while True:\n clear()\n print(\"Time Format: hours:minutes:seconds --:--:--\\n\")\n print(\"{}\\n\".format(custom_text))\n task_date = input(\"Please input a duration of time: \")\n try:\n datetime.datetime.strptime(task_date, fmt)\n except ValueError:\n print(\"'{}' doesn't seem to be a valid time.\".format(task_date))\n input(\"Press Enter\")\n except AttributeError:\n print(\"'{}' doesn't seem to be a valid time.\".format(task_date))\n input(\"Press Enter\")\n else:\n return datetime.datetime.strptime(task_date, fmt).time()\n break",
"def right_time(user):\n timezone = user.timezone\n start_time = float(user.night_start.replace(':', '.'))\n end_time = float(user.night_end.replace(':', '.'))\n\n #Get user's timezon's local time\n local_time = utils.local_from_timezone(timezone)\n return not (local_time >= start_time or local_time < end_time)",
"def get_time():\n clear_screen()\n while True:\n try:\n time = int(input(\"Add Time in Minutes: \"))\n break\n except ValueError:\n print(\"Please, Enter a Whole Number in Minutes!\")\n return time",
"def validtimefilter(self, hito):\n\t\tif self.horadesde == \"\" and self.horahasta == \"\":\n\t\t\treturn True\n\t\telse:\n\n\t\t\thora = hito.fechahora[hito.fechahora.index(\" / \")+3:]\n\n\t\t\thora_hito = datetime.datetime.strptime(hora, \"%H:%M\")\n\t\t\tif self.horadesde != \"\":\n\t\t\t\tif self.isPrimerHitoDelDia(hito):\n\t\t\t\t\thora_desde = datetime.datetime.strptime(self.horadesde, \"%H:%M\")\n\t\t\t\t\tif hora_desde > hora_hito:\n\t\t\t\t\t\treturn False\n\n\t\t\tif self.horahasta != \"\":\n\t\t\t\tif self.isUltimoHitoDelDia(hito):\n\t\t\t\t\thora_hasta = datetime.datetime.strptime(self.horahasta, \"%H:%M\")\n\t\t\t\t\t#print(\"%s --- %s = %s --- %s\" % (self.horahasta,str(hora_hasta),hora_hito, str(hora_hito)))\n\t\t\t\t\tif hora_hasta < hora_hito:\n\t\t\t\t\t\treturn False\n\n\t\t\treturn True",
"def check_time(self,data,data_orginal):\n if data['start_time'] > data['end_time']:\n raise ValidationError('event end time should be greater than start time.')",
"def is_complete_hour(text):\n for fmt in ['%H:%M:%S', '%H:%M']:\n try:\n strptime(text, fmt)\n return True \n except ValueError:\n pass\n return False",
"def _validate_time_fields(cls, item):\n if item.time_started_msec and (\n item.time_queued_msec > item.time_started_msec):\n cls._add_error(\n 'time queued check',\n 'Entity id %s: time queued %s is greater '\n 'than time started %s' % (\n item.id, item.time_queued_msec, item.time_started_msec))\n\n if item.time_finished_msec and (\n item.time_started_msec > item.time_finished_msec):\n cls._add_error(\n 'time started check',\n 'Entity id %s: time started %s is greater '\n 'than time finished %s' % (\n item.id, item.time_started_msec, item.time_finished_msec))\n\n current_time_msec = utils.get_current_time_in_millisecs()\n if item.time_finished_msec > current_time_msec:\n cls._add_error(\n 'time finished check',\n 'Entity id %s: time finished %s is greater '\n 'than the current time' % (\n item.id, item.time_finished_msec))",
"def _validate_time_params(time_params):\n allowed_params = (\"Ntimes\", \"start_time\", \"integration_time\", \"time_array\")\n if time_params.get(\"time_array\", None) is not None:\n return True\n elif all(time_params.get(param, None) is not None for param in allowed_params[:-1]):\n # Technically, start_time doesn't need to be specified, since it has a\n # default setting in io.py, but that might not be set in stone.\n return True\n else:\n return False"
]
| [
"0.78156906",
"0.76430243",
"0.76430243",
"0.74963695",
"0.727451",
"0.701177",
"0.689384",
"0.6890916",
"0.67932993",
"0.67757434",
"0.67485106",
"0.67383194",
"0.6708769",
"0.669873",
"0.6616662",
"0.65898037",
"0.6587257",
"0.65815306",
"0.6560355",
"0.6555645",
"0.6541537",
"0.65073895",
"0.6376683",
"0.63733286",
"0.6334494",
"0.6322004",
"0.6299936",
"0.62809515",
"0.6277435",
"0.6274125"
]
| 0.8539093 | 0 |
Method properDayInput returns whether the user erntered a valid day | def properDayInput(day):
possibleStrings = ["m","mon","monday","tu","tue","tues","tuesday","w",
"we","wed","wednesday","th","tr","r", "thu","thur","thurs","thursday","f","fr",
"fri","friday","sa","sat","saturday","su","sun","sunday"]
validString = False
for i in range(0, len(possibleStrings)):
if possibleStrings[i] == day.lower():
validString = True
return validString | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_day():\n\twhile True:\n\t\t_day = input(\"Introduceti ziua: \")\n\t\ttry:\n\t\t\t_day = int(_day)\n\t\t\tif (not is_in_range(_day, 0, VALID_DAY)):\n\t\t\t\tprint(\"Ziua invalida.\")\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Ziua invalida, introduceti un intreg.\")\n\treturn (_day)",
"def _valid_day(self, date_find):\n try:\n datetime.strptime(date_find, settings.TIME_FORMAT)\n valid = True\n except ValueError:\n valid = False\n return valid",
"def get_day():\n return handle_invalid_inputs(question_4, days)",
"def is_valid_day (val):\n if len(val) == 2 and count_digits(val) == 2:\n day = int(val)\n return day > 0 and day < 32\n return False",
"def enter_date():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'date': ''}\n\n while not valid_data:\n input_data['date'] = get_input(\"Date of the task\" + \"\\n\" + \"Please use DD/MM/YYYY format: \")\n if re.match('\\d{2}/\\d{2}/\\d{4}', input_data['date']):\n try:\n datetime.datetime.strptime(input_data['date'], '%d/%m/%Y')\n except ValueError:\n clean_scr()\n get_input(\"Enter a valid date. Press enter to try again.\")\n else:\n valid_data = True\n clean_scr()\n\n return input_data['date']",
"def is_valid_date(date):\n\n try:\n parse(date)\n return date\n except:\n new_date = raw_input(\"Invalid date, try again: YYYY-MM-DD \")\n return is_valid_date(new_date)",
"def datetime_checkinput(year, month, day):\n try:\n datetime.datetime(year, month, day)\n except:\n raise Invaliddatetimeinput\n return 0",
"def get_day_of_week_from_user():\n while True:\n day = input('Select the month to explore. Enter from monday, tuesday, wednesday, thursday, friday, '\n 'saturday, sunday or all: ').lower()\n\n if day in VALID_DAYS:\n confirm = input(\"You have selected {}. Press 'y' to confirm: \".format(day.title()))\n\n if confirm == 'y':\n break\n else:\n print(\"Try again.\\n\")\n else:\n print(\"Invalid input: {}. Try again.\\n\".format(day))\n return day",
"def read_day_range(where):\n\twhile True:\n\t\tif (where == 'start'):\n\t\t\t_day = input(\"Introduceti ziua de inceput: \")\n\t\telif (where == 'end'):\n\t\t\t_day = input(\"Introduceti ziua de sfarsit: \")\n\t\telse:\n\t\t\traise NameError\n\t\ttry:\n\t\t\t_day = int(_day)\n\t\t\tif (not is_in_range(_day, 0, VALID_DAY)):\n\t\t\t\tprint(\"Ziua invalida.\")\t\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Ziua invalida, introduceti un intreg.\")\n\treturn (_day)",
"def _validate(year, month, day):\n if day is not None and month is None:\n raise ValueError(\"Day without month\")\n if day is None:\n day = 1\n if month is None:\n month = 1\n if year is None:\n year = 2000\n # actual validation happens here\n datetime.date(year, month, day)",
"def DateInput(message):\n askAgainMessage = \"The date must be in the format DD/MM/YYYY\"\n keepAsking = True\n while keepAsking:\n answer = input(message)\n # First we check if there are two / by splitting using / and looking\n # for 3 items in the returned list.\n dateCheck = answer.split(sep=\"/\")\n if len(dateCheck) is not 3:\n print(askAgainMessage)\n else:\n # If all is order, we can assign the 3 items to day, month, year\n day = dateCheck[0]\n month = dateCheck[1]\n year = dateCheck[2]\n # Next we check each item has the right amount of characters\n # and they can all be converted into numbers.\n if (len(day) == 2 and len(month) == 2 and len(year) == 4 and\n CheckNumber(day) and CheckNumber(month) and\n CheckNumber(year)):\n day = int(day)\n month = int(month)\n year = int(year)\n if (day > 0 and day < 32 and month > 0 and month < 13 and\n year > 2000 and year < 3000):\n keepAsking = False\n else:\n print(askAgainMessage)\n else:\n print(askAgainMessage)\n return answer",
"def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date",
"def whatday(num):\n return days[num] if num in days.keys() else \"Wrong, please enter a number between 1 and 7\"",
"def get_day(month_name, num_days):\n display_month(month_name, num_days)\n day = input(\"Enter Day: \")\n try:\n day = int(day)\n if day > num_days or day < 1:\n os.system('cls')\n print(\"Accepted Values: 1-\" + str(num_days))\n return get_day(month_name, num_days)\n else:\n return day\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 1-\" + str(num_days))\n return get_day(month_name, num_days)",
"def week_init():\n week = input('Week to check: MM/DD/YYYY\\n')\n week = dtt.datetime.strptime(week,'%m/%d/%Y') #turns input to a datetime\n beforeday = input('Check days before date (Press enter to use today): MM/DD/YYYY\\n') or dtt.date.today()\n if (beforeday != dtt.date.today()):\n beforeday = dtt.datetime.strptime(beforeday,'%m/%d/%Y')\n return week, beforeday",
"def check_date(date, logger):\n logger.info('Checking the entered date...')\n try:\n (datetime.datetime.strptime(date, '%Y%m%d')).date()\n return True\n except Exception:\n raise SystemExit('Please, enter the date in \"YYYYMMDD\" format')",
"def test_date_entry_returns_correct_value_for_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n self.menu.OPTIONS['date format'] = date_format\n\n user_input = [date_string]\n\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.date_entry()\n\n expected_result = (\n None,\n datetime.datetime.strptime(date_string,\n date_format['datetime format'])\n )\n\n self.assertEqual(result, expected_result)",
"def validate_input(date_string):\n #I decided to make sure the input was valid by checking each individual piece. I did this by splitting the input string by the dashes.\n #I checked first that the month value was between 1 and 12. I then checked depending on the month if the day value was valid.\n #I also made sure to check that the year was greater than 1000.\n #For February, I made a specific check for if it was a leap year or not. If the year inputted is not a leap year and the user entered\n #29 as the day value, it throws an error. Finally, once all values are checked and are valid, they are put into a tuple.\n splitdate = date_string.split(\"-\")\n if splitdate[0] != '' and splitdate[1] != '' and splitdate[2] != '':\n if int(splitdate[0]) >= 1 and int(splitdate[0]) <= 12:\n if int(splitdate[0]) == 1 or int(splitdate[0]) == 3 or int(splitdate[0]) == 5 or int(splitdate[0]) == 7 or int(splitdate[0]) == 8 or int(splitdate[0]) == 10 or int(splitdate[0]) == 12:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 31:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 4 or int(splitdate[0]) == 6 or int(splitdate[0]) == 9 or int(splitdate[0]) == 11:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 30:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 2:\n if int(splitdate[2]) % 4 == 0 or int(splitdate[2]) % 1000 == 0:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 29:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[1]) >= 1 and int(splitdate[1]) <= 28:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n return None",
"def test_check_args_weekend(self):\n test_date = dt.datetime(2021, 6, 20, 11, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests during weekdays only.\" in str(context.exception))",
"def condition(self, year, month, day, lastday, leapday):\n try:\n if len(day) == 0 or int(day) > int(lastday):\n if int(month) == 2 and day == leapday:\n Input.change_display(self, self.entries[4],\n 'Not a leap year')\n else:\n Input.change_display(self, self.entries[4],\n 'Enter day between 1-' + lastday)\n elif int(day) <= int(lastday):\n Input.change_display(self, self.entries[3], #Weekday message\n Output.message(self, year, month, day))\n except:\n Input.change_display(self, self.entries[4],\n 'Enter day between 1-' + lastday)",
"def test_validate_date_entry_returns_correct_ValueError(self):\n date_string = \"2018-21-01\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"{} is not valid in format {}\".format(\n date_string,\n date_format['UI format']\n )\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)",
"def valid_args(args):\n is_valid = True\n\n # valid date format?\n try:\n datetime.datetime(year=args.year, month=args.month, day=args.day)\n except Exception:\n traceback.print_exc()\n is_valid = False\n\n print(f\"Arguments: {args}\")\n return is_valid",
"def valid_date(input_date):\n try:\n input_dt = dt.datetime.strptime(input_date, \"%Y-%m-%d\")\n return input_date\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(input_date)\n raise argparse.ArgumentTypeError(msg)",
"def check_day(self, day_of_week):\n\n day_of_week -= 1\n if (day_of_week == -1):\n self.day_of_week = 6\n else:\n self.day_of_week = day_of_week",
"def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue",
"def test_validate_date_entry_returns_correct_outOfBounds_if_future(self):\n date_string = \"3018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"dates in the future are not permitted\"\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)",
"def getBugsToday(myDay):\r\n #set bugs_today as neg one to accept zero as an input\r\n bugs_today = -1\r\n while bugs_today < 0 :\r\n myBugs_Validation = (input(u'Enter the number of bugs collected on day ' + str(myDay) + ' : '))\r\n #call my getValidation to check values entered\r\n bugs_today = getValidation(myBugs_Validation)\r\n #check if user entered a valid number\r\n if bugs_today == -1:\r\n print('\\nPlease enter the number of bugs collected. \\nEnter a whole integer number >= 0')\r\n \r\n return bugs_today",
"def chkDate(stdin):\n # return \"Y\" if dateCheck(stdin) else \"N\"\n return run(\"./chkdate\", [], stdin)[1].strip()",
"def isoweekday(self, *args, **kwargs): # real signature unknown\r\n pass",
"def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")"
]
| [
"0.72177476",
"0.7162018",
"0.70186716",
"0.6874332",
"0.68444574",
"0.6799649",
"0.67022496",
"0.6678752",
"0.6669538",
"0.6626575",
"0.6551671",
"0.6516856",
"0.6317545",
"0.6277343",
"0.6194926",
"0.61726516",
"0.61430144",
"0.6137453",
"0.61150825",
"0.61132497",
"0.6091419",
"0.60832566",
"0.6037802",
"0.60299104",
"0.59824604",
"0.59360135",
"0.5930289",
"0.5909613",
"0.58974326",
"0.58774513"
]
| 0.75861144 | 0 |
Method displayClassList displays the ongoing list of classes, numbered | def displayClassList(showExtraOption):
if len(classes) == 0:
print("\nThere are no classes\n")
delay()
return
print()
outstr = "{0:<12} {1:^18.5} {2:>5}"
print(outstr.format(" Class","Day","Time"))
for i in range(0, len(classes), 1):
print(str(i+1) + "." + " " + str(classes[i]))
if(showExtraOption): #If True is passed to the function, the option for "None" will appear
print(str(len(classes) + 1) + "." + " None")
else:
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_classes():\n for obj in Classes.get_all_obj_list():\n print('\\033[33;1m[%s] [%s]校区 [%s]班级 学费[%s]\\033[0m'.center(60, '-') \\\n % (obj.school_nid.get_obj_by_uuid().name, obj.school_nid.get_obj_by_uuid().addr, \\\n obj.name, obj.tuition))",
"def show_class(self,c):\n sorted_data = sorted((self.prob(tpl,self.class_counts,self.feature_counts)[c],\n ind, # preserve ordering for equal probabilities\n tpl)\n for (ind,tpl) in enumerate(self.dataset.train))\n for cc,r,tpl in sorted_data:\n print(cc,*tpl,sep='\\t')",
"def display_label(f_class, catalog): \n # Transform the top n class indexes into class labels LIST.\n return catalog[str(f_class)]",
"def display(self):\r\n os.system('cls')\r\n index = 0\r\n for i in self.list:\r\n print(str(index) + \" \" + i.showRule())\r\n index += 1",
"def process_class_list(self, module, classes):",
"def print_job_classes_info(self, class_list, show_jobs_flag=False):\n\n job_classes_dict = {}\n for job in self.jobs:\n\n classes_job_belongs_to = job.get_class_name(class_list)\n\n # print job-class info only if\n if show_jobs_flag:\n print(\"job name: {}\".format(job.label))\n print(\"-----> belongs to classes: {}\".format(classes_job_belongs_to))\n\n for job_class_name in classes_job_belongs_to:\n job_classes_dict.setdefault(job_class_name, []).append(job)\n\n print(\"============ SIM: {} ===============\".format(self.name))\n\n total_jobs_in_classes = 0\n for k,v in job_classes_dict.items():\n print(\"CLASS: {}, contains {} jobs\".format(k, len(v)))\n total_jobs_in_classes += len(v)\n\n print(\"total n jobs {}\".format(len(self.jobs)))\n print(\"total n in classes {}\".format(total_jobs_in_classes))",
"def print_list(self):\r\n pass",
"def set_class_list(self, L):\n\t\tself.class_list = L",
"def print_results(classes, output_file): \n for DataClass in classes:\n DataClass.datalock.acquire()\n output_file.write(\"--- \" + DataClass.__name__ + \" ---\\n\")\n for data in DataClass.data:\n output_file.write(data + repr(DataClass.data[data]) + '\\n')\n output_file.write('\\n')\n DataClass.datalock.release()\n return",
"def displaySorted(self):\r\n os.system('cls')\r\n for i in self.sortedList:\r\n print(str(i[2]) + \": \" + i[0].showRule())",
"def summarize_classes(classes):\n u, indices = np.unique(classes,return_inverse=True)\n num_u=len(u)\n print(\"****************************\")\n print(\"Number of samples: {0}\".format(len(classes)))\n print(\"Number of Classes:{0}\".format(num_u))\n for c in u:\n num_c=np.sum(classes==c)\n print(\"Class {0}: {1} Samples\".format(c,num_c))\n print(\"****************************\")",
"def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()",
"def list_classes(filename, output_file):\n file_pointer = open(filename)\n file_split = filename.replace(\"/\",\".\")\n file_split = file_split.split(\".\")\n\n class_re = re.compile(\"^class ([A-Za-z]+[^\\(:]*)\")\n method_re = re.compile(\"^ def ([a-z][a-z_]*)\")\n # remove_self_re = re.compile(r\"self(, )?\")\n first = True\n\n for line in file_pointer:\n\n class_names = class_re.findall(line)\n if len(class_names) > 0:\n if first:\n first = False\n output_file.write(\"Classes\\n\")\n output_file.write(\"^^^^^^^\\n\")\n output_file.write(\"- \")\n module = file_split[4]\n class_name = class_names[0]\n output_file.write(f\":class:`~arcade.{module}.{class_name}`\")\n output_file.write(\"\\n\")\n\n method_names = method_re.findall(line)\n for method_name in method_names:\n # method_name = name[2]\n output_file.write(f\" - :func:`~arcade.{module}.{class_name}.{method_name}`\\n\")\n # name = remove_self_re.sub(\"\", name)\n\n if not first:\n output_file.write(\"\\n\")",
"def count_class(srcfile, listfile):\n cls_list = []\n\n # open the list file\n with open(listfile, 'r') as f:\n lines = f.readlines()\n\n # check each file in the list\n for line in lines:\n xml_file = srcfile.format(line.strip())\n\n tree = ET.parse(xml_file)\n\n # objs is all the objects in the xml\n objs = tree.findall('object')\n\n # find the class name in the object, and add it to the cls list\n for ix, obj in enumerate(objs):\n cls = str(obj.find('name').text)\n cls_list.append(cls)\n\n # find the keys and sort, count the number of boxes of the keys\n if len(cls_list) > 0:\n cls_list.sort()\n import numpy as np\n cls_arr = np.array(cls_list)\n cls1 = list(set(cls_list))\n print('unsort classes is:', cls1)\n cls1.sort()\n print('sorted classes is:', cls1)\n classes = np.unique(cls_arr)\n print('the class number is:', classes.shape[0])\n print('----------------------------')\n print('the number of each class:')\n for i in range(0, classes.shape[0]):\n # print(classes[i], cls_list.count(classes[i]))\n print(classes[i], ':', np.where(cls_arr==classes[i])[0].shape[0])\n print('----------------------------')\n\n print('the number of all the boxes is:', len(cls_list))\n return cls_list",
"def displayThreads(self):\n print('{:18} {:20} {}'.format('THREAD NAME','INFO','IS ALIVE'))\n for key in sorted(list(self.threadlist.keys())):\n print('{!s:18}: {!s:20} {}'.format(key,\n self.threadlist[key], self.threadlist[key].isAlive()))",
"def print_process_list(self) -> None:\n\n print(f\"Process List: {self.process_list}\")",
"def printListOfCalibTypes (self) :\n print '\\nprintListOfCalibTypes(): list_of_clib_types:' #, self.list_of_clib_types\n for type in self.list_of_clib_types : print ' ', type",
"async def top_10_class(self):\r\n players = await self.get_players()\r\n classes = []\r\n # Add all players\r\n for player in players:\r\n classes.append(int(player['classId']))\r\n del classes[10:]\r\n await self.bot.send_message('Top 10 3v3 Composition:')\r\n for xvar in range(1, 13):\r\n if players.count(xvar) > 0:\r\n await self.bot.send_message('{:s}: {:d}'.format(self.classes[xvar - 1], classes.count(xvar)))",
"def update_classes(class_list_, champions_list_, class_counters_):\n logging.debug(\"Function update_classes() called\")\n\n class_counters_value_list = [0] * len(class_list_)\n for i, class_ in enumerate(class_list_): # looping over counters for every class\n logging.info(\"Current class: %s\", class_)\n for (\n champ\n ) in (\n champions_list_\n ): # for loop to assign how much champions are nonzero in class\n if champ.ChampCounter.get() >= 1:\n logging.info(\"Current champ with counter >=1: %s\", champ.name)\n if class_ in (champ.class_prim, champ.class_sec):\n logging.info(\n \"Current champ with counter >=1 match class Prim or Sec \\\n : %s or %s\",\n champ.class_prim,\n champ.class_sec,\n )\n class_counters_value_list[i] = class_counters_value_list[i] + 1\n logging.info(\n \"Number of nonzero champions in this class = %s\",\n class_counters_value_list[i],\n )\n class_counters_[i].set(class_counters_value_list[i])\n\n logging.debug(\"Function update_classes() end\")",
"def do_show(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n print(dict_objs[key])\n else:\n print(\"** no instance found **\")",
"async def top_100_class(self):\r\n players = self.get_players()\r\n classes = []\r\n for player in players:\r\n classes.append(player['classId'])\r\n del classes[100:]\r\n await self.bot.send_message('Top 100 3v3 Composition:')\r\n for xvar in range(1, 13):\r\n if classes.count(xvar) > 0:\r\n await self.bot.send_message('{:s}: {:d}'.format(self.classes[xvar - 1], classes.count(xvar)))",
"def class_page(request):\r\n class_extensions = [] # Empty list used to store the class extensions\r\n book = Books() # Books object initialization\r\n user = request.user # Get the currently authenticated user\r\n no_books = True # Flag value, assuming no books exist for a user\r\n\r\n class1 = user.class_schedule.class1\r\n class2 = user.class_schedule.class2\r\n class3 = user.class_schedule.class3\r\n class4 = user.class_schedule.class4\r\n class5 = user.class_schedule.class5\r\n class6 = user.class_schedule.class6\r\n classes = Classes_list()\r\n display_classes = classes.display_classes(class1, class2, class3, class4, class5, class6)\r\n\r\n # Check to see if books exist yet for a user\r\n for book in Books.objects.filter(user_id__pk=user.id)[:1]:\r\n no_books = False\r\n\r\n # Regenerate a new schedule based on the users major\r\n if request.method == 'POST':\r\n new_users_schedule = Class_schedule(user.id)\r\n new_users_schedule.create_class(user.major)\r\n new_users_schedule.save() \r\n return HttpResponseRedirect(reverse('ez_main:class_page'))\r\n\r\n for obj in display_classes:\r\n class_extensions.append(obj.class_extension)\r\n\r\n # If no books exist for the user, get the users books based on classes\r\n if no_books:\r\n print(\"made the books!\")\r\n book = book.find_books(class_extensions, user)\r\n\r\n return render(request, 'ez_main/class_page.html', {'display_classes': display_classes})",
"def classes(self):\n return str(self._classes)",
"def do_show(self, args):\n temp = args.split()\n\n if len(temp) == 0:\n print(\"** class name missing **\")\n return\n elif temp[0] not in self.myclasses:\n print(\"** class doesn't exist **\")\n return\n elif len(temp) < 2:\n print('** instance id missing **')\n return\n else:\n all_objs = storage.all()\n for i in all_objs.keys():\n if i == \"{}.{}\".format(temp[0], temp[1]):\n print(all_objs[i])\n return\n print('** no instance found **')",
"def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1",
"def report(self):\n print()\n print(\"%-15s %-25s %s\" % (\"Class\", \"Name\", \"File\"))\n print(\"%-15s %-25s %s\" % (\"-----\", \"----\", \"----\"))\n for m in sorted(self.flatten(), key=lambda n: n.identifier):\n print(\"%-15s %-25s %s\" % (type(m).__name__, m.identifier, m.filename or \"\"))",
"def dir_classify_n(lsorted, class_instance, class_dict,Lwrite=1):\n #print(classobj_dict_key)\n luse=[]\n ukeys=[] # used keys\n lsuff=['py','sh','csh','pl']\n for f in class_dict.__dict__.keys(): # as for keys == py_fname(.py)\n Ltag = False\n for suf in lsuff:\n fsuf = f + '.' + suf\n if fsuf in lsorted: # scan for keys\n luse.append(fsuf)\n lsorted.remove(fsuf) # remove key.py\n ukeys.append(f) # return key\n Ltag = True\n continue\n if Ltag == True:\n continue\n\n #print(f\" in dir_classify(): {f}\") # to print all the not-selected files\n ### classify modules used\n if luse:\n CLASS_instance = class_instance.upper()\n print(\" {:<10}::\".format(CLASS_instance))\n if Lwrite:\n for f in luse:\n print(f\" {f} \")\n return ukeys",
"def display_all(self) -> None:\n self.display.draw_list(self.read_all_statuses())",
"def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)",
"def export_cropobject_class_list(cropobject_classes):\n # type: (List[CropObjectClass]) -> str\n cropobject_classes_string = '\\n'.join([str(c) for c in cropobject_classes])\n\n lines = list()\n\n lines.append('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n lines.append('<CropObjectClassList'\n ' noNamespaceSchema=\"mff-muscima-cropobject-classes.xsd\"'\n ' xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"'\n ' xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">')\n lines.append('<CropObjectClasses>')\n lines.append(cropobject_classes_string)\n lines.append('</CropObjectClasses>')\n lines.append('</CropObjectClassList>')\n return '\\n'.join(lines)"
]
| [
"0.71469915",
"0.6526368",
"0.64634573",
"0.6399989",
"0.62757725",
"0.6181238",
"0.5977218",
"0.5869728",
"0.578967",
"0.57455575",
"0.56798834",
"0.5679596",
"0.5643068",
"0.5642376",
"0.55955905",
"0.55777174",
"0.5554064",
"0.54997116",
"0.54984796",
"0.5498105",
"0.54953724",
"0.5482987",
"0.54808354",
"0.54762274",
"0.54671353",
"0.54536766",
"0.54283965",
"0.541761",
"0.54174286",
"0.54121953"
]
| 0.6831478 | 1 |
Method add class takes user inputs and adds a class to the ongoing list of classes, sorted | def addClass():
print("\nEnter classes by day. For example enter all your Monday classes first, then Tuesday, etc.")
print("When asked to put in class meeting times enter in 24 hr format. Example: 1:00 p.m = 1300 8:00 a.m = 0800")
day = input("Day of Class: ")
while not properDayInput(day): #While format is not correct, persist on getting the correct entry
print("Please enter a day of the week")
day = input("Day of Class: ")
className = input("Name of Class: ").strip()
if className == "": #If user does not put in a field (or just a bunch of spaces)
className = "EMPTY ENTRY!"
startTime = input("Starting Time: ")
while not properTimeInput(startTime):
startTime = input("StartingTime: ")
endTime = input("Ending Time: ")
while not properTimeInput(endTime):
endTime = input("Ending Time: ")
class_ = Class(className, Day(day), startTime, endTime) #Creating class object from user's entries
for i in range (0, len(classes),1): #Checking for overlaping/duplicate classes
classInList = classes[i]
if(class_ == classInList):
print("\nThere is a scheduling conflict with class: " + str(classInList) + " and " + str(class_))
print("The class you just entered was not added to schedule. Please try another entry or edit an existing class\n")
return #Break out of function
classes.append(Class(className.upper(), Day(day), startTime, endTime))
print("\nClass added to schedule")
classes.sort()
delay()
clearTerminal() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_class(self, new_class):\n index = self._counter\n self._counter += 1\n for element in new_class:\n self._class_names[element] = index\n node = self.part[index].append(element)\n self._place[element] = node",
"def process_class_list(self, module, classes):",
"def insert_class(self, user_label: str) -> None:\n logger.debug(\"Adds \\\"{}\\\" to the user label class\", user_label)\n\n if self.cluster_k == -1:\n logger.debug(\"Clustering is disabled. Hence, just added to the list in set-semantic (current length: {})\",\n len(self.classes))\n final_label_tokens = self.convert_label(user_label=user_label)\n if final_label_tokens not in self.classes:\n self.classes.append(final_label_tokens)\n else:\n logger.debug(\"\\\"{}\\\" was already in the list!\", \" \".join(final_label_tokens))",
"def chooseClass(self):\n #global dictionary of classes with 0 values in a list (ex. [str,dex,con,int,wis,cha,hp,suggested specialty])\n classes = {'barbarian': [0,0,0,0,0,0,self.con+12,'reaper'],\n 'cleric':[0,0,0,0,0,0,self.con+8,'mystical healer'],\n 'druid':[0,0,0,0,0,0,self.con+8,'hedge magician'],\n 'fighter':[0,0,0,0,0,0,self.con+10,'reaper'],\n 'monk':[0,0,0,0,0,0,self.con+8,'skirmisher'],\n 'paladin':[0,0,0,0,0,0,self.con+10,'defender'],\n 'ranger':[0,0,0,0,0,0,self.con+10,'sharpshooter'],\n 'rogue':[0,0,0,0,0,0,self.con+6,'specialist'],\n 'wizard':[0,0,0,0,0,0,self.con+6,'hedge magician']\n }\n\n #Ask which class he/she would like\n chosen_class = raw_input(\"Which class would you like? Please choose from:\\nBarbarian, Cleric, Druid, Fighter, Monk, Paladin, Ranger, Rogue, Wizard \" ).lower() \n while chosen_class not in ['barbarian','cleric','druid','fighter','monk','paladin','ranger','rogue','wizard']:\n chosen_class = raw_input(\"\\nIncorrect input\\n\\nWhich class would you like? Please choose from:\\nBarbarian, Cleric, Druid, Fighter, Monk, Paladin, Ranger, Rogue, Wizard \" ).lower() \n print\n\n #Adds character class to Class object for use in print statements\n self.classType = chosen_class.title()\n \n \n\n #Class specific conditional statements. These update the various ability scores\n #in the classes variable\n if chosen_class == 'barbarian':\n barb_choice = raw_input('Would you like to boost (1) Strength or (2) Constitution? ')\n print\n while barb_choice not in ['1','2']:\n barb_choice = raw_input('Would you like to boost (1) Strength or (2) Constitution? ')\n print\n if barb_choice == '1':\n classes['barbarian'][0] = 1\n elif barb_choice == '2':\n classes['barbarian'][2] = 1\n elif chosen_class == 'cleric':\n clerc_choice = raw_input('Would you like to boost (1) Wisdom, (2) Strength, or (3) Constitution? ')\n print\n while clerc_choice not in ['1','2','3']:\n clerc_choice = raw_input('Would you like to boost (1) Wisdom, (2) Strength, or (3) Constitution? ')\n print\n if clerc_choice == '1':\n classes['cleric'][4] = 1\n elif clerc_choice == '2':\n classes['cleric'][0] = 1\n elif clerc_choice == '3':\n classes['cleric'][2] = 1\n elif chosen_class == 'druid':\n druid_choice = raw_input('Would you like to boost (1) Wisdom or (2) Constitution? ')\n print\n while druid_choice not in ['1','2']:\n druid_choice = raw_input('Would you like to boost (1) Wisdom or (2) Constitution? ')\n print\n if druid_choice == '1':\n classes['druid'][4] = 1\n elif druid_choice == '2':\n classes['druid'][2] = 1\n elif chosen_class == 'fighter':\n fight_choice = raw_input('Would you like to boost (1) Strength, (2) Dexterity, or (3) Constitution? ')\n print\n while fight_choice not in ['1','2','3']:\n fight_choice = raw_input('Would you like to boost (1) Strength, (2) Dexterity, or (3) Constitution? ')\n print\n if fight_choice == '1':\n classes['fighter'][0] = 1\n elif fight_choice == '2':\n classes['fighter'][1] = 1\n elif fight_choice == '3':\n classes['fighter'][2] = 1 \n elif chosen_class == 'monk':\n monk_choice = raw_input(\"Would you like to boost (1) Wisdom or (2) Dexterity? \")\n print\n while monk_choice not in ['1','2']:\n monk_choice = raw_input(\"Would you like to boost (1) Wisdom or (2) Dexterity? \")\n print\n if monk_choice == '1':\n classes['monk'][4] = 1\n elif monk_choice == '2':\n classes['monk'][1] = 1\n elif chosen_class == 'paladin':\n pal_choice = raw_input('Would you like to boost (1) Strength, (2) Constitution, or (3) Charisma? ')\n print\n while pal_choice not in ['1','2','3']:\n pal_choice = raw_input('Would you like to boost (1) Strength, (2) Constitution, or (3) Charisma? ')\n print\n if pal_choice == '1':\n classes['paladin'][0] = 1\n elif pal_choice == '2':\n classes['paladin'][2] = 1\n elif pal_choice == '3':\n classes['paladin'][5] = 1\n elif chosen_class == 'ranger':\n rang_choice = raw_input('Would you like to boost (1) Strength, (2) Dexterity, or (3) Constitution? ')\n print\n while rang_choice not in ['1','2','3']:\n rang_choice = raw_input('Would you like to boost (1) Strength, (2) Dexterity, or (3) Constitution? ')\n print\n if rang_choice == '1':\n classes['ranger'][0] = 1\n elif rang_choice == '2':\n classes['ranger'][1] = 1\n elif rang_choice == '3':\n classes['ranger'][2] = 1\n elif chosen_class == 'rogue':\n rog_choice = raw_input('Would you like to boost (1) Strength, (2) Dexterity, or (3) Intelligence? ')\n print\n while rog_choice not in ['1','2','3']:\n rog_choice = raw_input('Would you like to boost (1) Strength, (2) Dexterity, or (3) Intelligence? ')\n print\n if rog_choice == '1':\n classes['rogue'][0] = 1\n elif rog_choice == '2':\n classes['rogue'][1] = 1\n elif rog_choice == '3':\n classes['rogue'][3] = 1\n elif chosen_class == 'wizard':\n wiz_choice = raw_input('Would you like to boost (1) Intelligence or (2) Constitution? ')\n print\n while wiz_choice not in ['1','2']:\n wiz_choice = raw_input('Would you like to boost (1) Intelligence or (2) Constitution? ')\n print\n if wiz_choice == '1':\n classes['wizard'][3] = 1\n elif wiz_choice == '2':\n classes['wizard'][2] = 1\n \n #Update base stats\n\n #A basic list full of the types of ability scores\n stats_list = ['str','dex','con','int','wis','cha','hp']\n #loops through the stats_list and adds all numbers to character's\n #starting stats\n for i in range(len(stats_list)):\n self.stealthUpdate(stats_list[i],classes[chosen_class][i])\n \n\n #modify hp if character is starting out higher than level 1\n def update_hp_for_higher_level(chosen_class,level):\n \"\"\"\n Helper function for chooseClass(). Updates character for\n levels greater than 1.\n \"\"\"\n #Checks to see if your character is level 4,8,12,etc.\n def upgradedAbilityAt4(level):\n if level % 4 == 0:\n upgraded_ability = raw_input(\"Level \"+str(level)+\"!\\n Which two abilities would you like to upgrade? (Adds +1 to ability)\\n Please input two from str/dex/con/int/wis/cha with a space in between.\\n (ex: cha dex) \").split(' ')\n print\n #To write:\n #if either ability pushes ability score over 20, redo input\n\n \n for i in upgraded_ability:\n self.stealthUpdate(i,1)\n #class specific HP calculations\n if chosen_class == 'barbarian': \n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,12) + self.con + self.classMods[6]\n elif chosen_class == 'cleric':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'druid':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'fighter':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'monk':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,8) + self.con + self.classMods[6]\n elif chosen_class == 'paladin':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'ranger':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,10) + self.con + self.classMods[6]\n elif chosen_class == 'rogue':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n elif chosen_class == 'wizard':\n for i in range(2,self.level+1):\n upgradedAbilityAt4(i)\n self.hp += r.randint(1,6) + self.con + self.classMods[6]\n \n\n if self.level > 1:\n update_hp_for_higher_level(chosen_class,self.level)",
"def set_class_list(self, L):\n\t\tself.class_list = L",
"def add_class(self, cls):\n self.commands.append(cls)",
"def save_class(self, a, class_name):\n logging.debug(\"in save class \" + class_name)\n self.produce(\"class_name\", class_name)\n self.classes.append(class_name)\n self.begin('')",
"def add_class(self, name):\n if name is not None and not self.has_class(name):\n self._cached_class.append(name)\n self._update_class()",
"def suggest_new_class_name(class_name):\n # TODO find all internal classes of this class, and use them to help generate suggestions\n if is_anonymous_class(class_name):\n # anonymous! leave this as is. (we'll just change the parent class)\n return None\n\n # get the list by count\n sorted_list = Counter(get_class_strings(class_name)).most_common()\n # pick the top contenders - take anything that has the same count as the the first in the list\n top_list = [element[0]\n for element in sorted_list if element[1] == sorted_list[0][1]]\n\n if not top_list:\n return None\n\n # remove bad chars\n BAD_CHARS = \":,/\\\\()[]$;#@!&^%*+'\\\"\"\n clean_top_list = []\n for top in top_list:\n for c in BAD_CHARS:\n top = top.replace(c,\"\")\n clean_top_list.append(top)\n top_list = clean_top_list\n \n # heuristics to pick between them - prefer things with dots, and things without \" \", and things that aren't empty\n if len(top_list) > 1:\n dot_list = [guess for guess in top_list if \".\" in guess]\n if dot_list:\n top_list = dot_list\n\n if len(top_list) > 1:\n no_space_list = [guess for guess in top_list if \" \" not in guess]\n if no_space_list:\n top_list = no_space_list\n\n # preference longer name\n top_list.sort(key=len, reverse=True)\n #print(top_list)\n new_name = top_list[0]\n\n # if there's a dot, split the name and take the last chunk\n if \".\" in new_name:\n new_name = new_name.split(\".\")[-1]\n\n # if there's a space, split the name and take the first chunk\n if \" \" in new_name:\n new_name = new_name.split(\" \")[0]\n\n # replace the old class name with this one\n # TODO handle internal classes (anonymous or otherwise)\n # should make sure, at minimum, they have the parent name in their symbol.\n package_elements, class_elements = split_class_name(class_name)\n\n class_elements[-1] = new_name\n new_name = \"L\" + \"/\".join(package_elements) + \\\n \"/\" + \"$\".join(class_elements) + \";\"\n\n return new_name.encode(\"utf-8\")",
"def addClassRef(clazz):\n\n global h_classes\n header = \"class %s;\" % clazz\n if not header in h_classes:\n h_classes.append(header)",
"def insert_classes(cursor):\n ranks = dict()\n with open(RANKS_PATH, encoding='UTF-8') as ranks_file:\n ranks_dict = ujson.load(ranks_file)\n for rank, ranked_archetypes in ranks_dict.items():\n try:\n rank = int(rank.strip(\"Rank\"))\n except ValueError:\n rank = MAX_RANK\n for ranked_classes in ranked_archetypes.values():\n for ranked_class in ranked_classes:\n ranks[ranked_class] = rank\n\n with open(CLASSES_PATH, encoding='UTF-8') as classes_file:\n classes_dict = ujson.load(classes_file)\n classes = list()\n # Get list of sorted classes\n sorted_classes_ids = list()\n for class_id in classes_dict.keys():\n if '_' in class_id:\n splited_class_id = class_id.split(\"_\", 1)\n sorted_classes_ids.append((class_id, int(splited_class_id[0].strip(\"Char\")), int(splited_class_id[-1])))\n else:\n sorted_classes_ids.append((class_id, 0, 0))\n sorted_classes_ids.sort(key=lambda tup: tup[2])\n sorted_classes_ids.sort(key=lambda tup: tup[1])\n # Start processing them\n for class_id, archetype, char_n in sorted_classes_ids:\n _class = classes_dict[class_id]\n class_info = list()\n # Get Class Name\n class_info.append(get_value(_class, \"Class\", \"name\", str))\n # Get Class Archetype\n class_info.append(get_archetype_id(get_value(_class, \"Class\", \"base\", str)))\n # Get Rank\n class_info.append(ranks.get(class_id, 0))\n # Get Icon\n class_info.append(format_icon(get_value(_class, \"Class\", \"icon\", str)))\n # Get Temp ID\n class_info.append(class_id)\n\n classes.append(tuple(class_info))\n\n classes = tuple(classes)\n\n cursor.executemany(\"INSERT INTO classes (name, archetype, rank, icon, temp_id) VALUES (?, ?, ?, ?, ?)\", classes)",
"def check_classes(class_name: str) -> str:\n classes_list = []\n class_directory = base_directory\n # Print out all classes in teh class_directory\n for i in os.listdir(class_directory):\n if i.startswith(\".\"):\n pass\n else:\n # Append name off classes to the list\n classes_list.append(i)\n # Check to see if the name of the class is in the list\n if class_name in classes_list:\n current_directory = os.path.join(base_directory, class_name, \"/\")\n return current_directory\n else:\n cprint(f\"{class_name} is not a class, creating new folder\", \"red\")\n new_directory = os.path.join(class_directory, class_name)\n os.mkdir(new_directory)\n cprint(f\"path {new_directory} created\", \"red\")\n return new_directory",
"def classes():\n print(\"\"\"Here are all the classes:\n barbarian, bard, cleric, druid, fighter, monk, paladin, ranger, rogue, sorcerer, warlock, wizard, blood hunter\"\"\")\n my_class = input('What class do you want to play as?')\n my_class = my_class.lower()\n if my_class in \"barbarian, bard, cleric, druid, fighter, monk, paladin, \" \\\n \"ranger, rogue, sorcerer, warlock, wizard, blood hunter\":\n return my_class\n else:\n print('That is not a class')\n return classes() # If they don't pick a listed class, re-run the function until they do",
"def prepare_classnames(self, start=None, add=None, exclude=None):\n classnames = start or []\n classnames.extend(add or [])\n return self.finalise_classname(classnames, exclude or [])",
"def process_class(self, parent, cls):\n if cls.typemap.flat_name in self.class_map:\n raise RuntimeError(\"process_class: class {} already exists in class_map\"\n .format(cls.typemap.flat_name))\n self.class_map[cls.typemap.flat_name] = cls\n for var in cls.variables:\n self.add_var_getter_setter(parent, cls, var)\n cls.functions = self.define_function_suffix(cls.functions)",
"def add_class(self, period_num, class_name):\n self._classes[period_num] = class_name",
"def increment_classes(self, num_classes):\n # Record number of input and output features for existing network\n in_features = self.model.fc.in_features\n out_features = self.model.fc.out_features\n # Record weight and bias values\n weight = self.model.fc.weight.data\n bias = self.model.fc.bias.data\n\n # If first task\n if self.seen_classes == 0:\n new_out_features = num_classes\n else:\n # Extend fc layer by number of new classes\n new_out_features = out_features + num_classes\n\n # Create new fc layer for the network\n self.model.fc = nn.Linear(in_features, new_out_features)\n\n # Init as per LwF paper\n kaiming_normal_init(self.model.fc.weight)\n # Reassign recorded weight and bias values to new layer\n self.model.fc.weight.data[:out_features] = weight\n self.model.fc.bias.data[:out_features] = bias\n # Increment number of classes\n self.n_classes += num_classes",
"def move_to_new_class(self, elements_to_move):\n for element in elements_to_move:\n place = self._place[element]\n place.delete()\n self.add_class(elements_to_move)",
"def suggest_new_names_for_all_classes_in_program(program=None, suggestions=None, class_path=None):\n if suggestions is None:\n suggestions = dict()\n\n # handle if we've already named these...\n for class_symbol in get_all_class_symbols_in_program(class_path=class_path, program=program):\n old_name = class_symbol.getName(True)\n \n excluded = False\n for excluded_path in EXCLUDED_NAMESPACES:\n if old_name.startswith(excluded_path):\n # don't care, skip\n excluded = True\n if excluded:\n continue\n \n old_name = ghidra_utils.SymbolDescriptor(old_name).to_java()\n\n # only run this for classes where we haven't already got a suggestion (e.g., from a previous program)\n if old_name not in suggestions:\n # only do this for proguarded classes\n if is_proguarded(old_name):\n new_name = suggest_new_class_name(old_name)\n\n #print(\"{}->{}\".format(old_name, new_name))\n suggestions[old_name] = new_name\n\n return suggestions",
"def update_classes(class_list_, champions_list_, class_counters_):\n logging.debug(\"Function update_classes() called\")\n\n class_counters_value_list = [0] * len(class_list_)\n for i, class_ in enumerate(class_list_): # looping over counters for every class\n logging.info(\"Current class: %s\", class_)\n for (\n champ\n ) in (\n champions_list_\n ): # for loop to assign how much champions are nonzero in class\n if champ.ChampCounter.get() >= 1:\n logging.info(\"Current champ with counter >=1: %s\", champ.name)\n if class_ in (champ.class_prim, champ.class_sec):\n logging.info(\n \"Current champ with counter >=1 match class Prim or Sec \\\n : %s or %s\",\n champ.class_prim,\n champ.class_sec,\n )\n class_counters_value_list[i] = class_counters_value_list[i] + 1\n logging.info(\n \"Number of nonzero champions in this class = %s\",\n class_counters_value_list[i],\n )\n class_counters_[i].set(class_counters_value_list[i])\n\n logging.debug(\"Function update_classes() end\")",
"def add_class(wire_version, cls, members):\n memid = 0\n\n sig = loxi_utils.class_signature(members)\n if cls in of_g.unified:\n uc = of_g.unified[cls]\n if wire_version in uc:\n debug(\"Error adding %s to unified. Wire ver %d exists\" %\n (cls, wire_version))\n sys.exit(1)\n uc[wire_version] = {}\n # Check for a matching signature\n for wver in uc:\n if type(wver) != type(0): continue\n if wver == wire_version: continue\n if not \"use_version\" in uc[wver]:\n if sig == loxi_utils.class_signature(uc[wver][\"members\"]):\n log(\"Matched %s, ver %d to ver %d\" % \n (cls, wire_version, wver))\n # have a match with existing version\n uc[wire_version][\"use_version\"] = wver\n # What else to do?\n return\n else: # Haven't seen this entry before\n log(\"Adding %s to unified list, ver %d\" % (cls, wire_version))\n of_g.unified[cls] = dict(union={})\n uc = of_g.unified[cls]\n\n # At this point, need to add members for this version\n uc[wire_version] = dict(members = members)\n\n # Per member processing:\n # Add to union list (I'm sure there's a better way)\n # Check if it's a list\n union = uc[\"union\"]\n if not cls in of_g.ordered_members:\n of_g.ordered_members[cls] = []\n for member in members:\n m_name = member[\"name\"]\n m_type = member[\"m_type\"]\n if m_name.find(\"pad\") == 0:\n continue\n if m_name in union:\n if not m_type == union[m_name][\"m_type\"]:\n debug(\"ERROR: CLASS: %s. VERSION %d. MEMBER: %s. TYPE: %s\" %\n (cls, wire_version, m_name, m_type))\n debug(\" Type conflict adding member to unified set.\")\n debug(\" Current union[%s]:\" % m_name)\n debug(union[m_name])\n sys.exit(1)\n else:\n union[m_name] = dict(m_type=m_type, memid=memid)\n memid += 1\n if not m_name in of_g.ordered_members[cls]:\n of_g.ordered_members[cls].append(m_name)",
"def editClass():\r\n noChangesMade = True #Keeps track if changes were made at all\r\n displayClassList(True)\r\n if len(classes) == 0: #if no classes, return\r\n return\r\n \r\n print(\"\\nWhich class would you like to edit?\")\r\n classIndex = input(\"Choice: \")\r\n while not properMenuChoice(classIndex):\r\n classIndex = input(\"Choice: \")\r\n if int(classIndex) == len(classes) + 1: #If the user selected the \"none\" option from displayClassList\r\n return\r\n classIndex = int(classIndex)\r\n \r\n class_ = classes[classIndex-1]\r\n print(\"\\nEnter the data for the class. Press <Enter/Return> to leave unchanged.\")\r\n \r\n newClassDay = input(\"Enter new day for class: \")\r\n if(newClassDay.islower() or newClassDay.isupper()): #If the user put in some kind of string\r\n while not properDayInput(newClassDay): #Make sure that it is a valid day of the week\r\n newClassDay = input(\"Please enter a day of the week: \")\r\n class_.setDay(Day(newClassDay))\r\n noChangesMade = False\r\n \r\n newClassName = input(\"Enter new name for class: \")\r\n if not newClassName.strip() == \"\": #Check for all blanks/spaces\r\n class_.setName(newClassName.upper())\r\n noChangesMade = False\r\n \r\n newStartTime = input(\"Enter new starting time for class: \")\r\n if not newStartTime.strip() == \"\": #Check for entry\r\n while not properTimeInput(newStartTime): #persist for proper entry\r\n newStartTime = input(\"Enter a valid new starting time (24 hr format): \")\r\n class_.setstartTime(newStartTime)\r\n noChangesMade = False\r\n \r\n newEndTime = input(\"Enter new ending time for class: \")\r\n if not newStartTime.strip() == \"\":\r\n while not properTimeInput(newEndTime):\r\n newEndTime = input(\"Enter a valid new ending time (24 hr format): \")\r\n class_.setendTime(newEndTime)\r\n noChangesMade = False\r\n \r\n if noChangesMade:\r\n print(\"\\n No Changes Made\")\r\n delay()\r\n else:\r\n print(\"\\nChanges Made\")\r\n delay()",
"def visit_ClassDef(self, node):\n if node in self.manager.found_classes:\n return\n\n self.manager.found_classes.add(node)\n self.manager.found[\"classes\"].append({\"name\":node.name,\n \"lineno\":node.lineno,\n \"namespace\":\".\".join(self.parent)})\n\n # Keep checking all nodes in this class.\n for my_node in node.body:\n self.manager._explorer(self.manager, self.parent + [node.name]).visit(my_node)",
"def pick_class(classes, sort=False, **kwargs):\n def _label(c):\n try:\n return c.LABEL\n except AttributeError:\n return c.__name__\n\n if sort:\n classes = sorted(classes, key=lambda x: _label(x))\n choices = [_label(c) for c in classes]\n return pick_item(classes, choices, **kwargs)",
"def add_class_name_field(data):\n\n for case in data:\n case_id = case['No.']\n filed_list = case_id.split('_')\n filed_list = list(map(lambda x: x.title(), filed_list))\n case['ClassName'] = ''.join(filed_list)",
"def dir_classify_n(lsorted, class_instance, class_dict,Lwrite=1):\n #print(classobj_dict_key)\n luse=[]\n ukeys=[] # used keys\n lsuff=['py','sh','csh','pl']\n for f in class_dict.__dict__.keys(): # as for keys == py_fname(.py)\n Ltag = False\n for suf in lsuff:\n fsuf = f + '.' + suf\n if fsuf in lsorted: # scan for keys\n luse.append(fsuf)\n lsorted.remove(fsuf) # remove key.py\n ukeys.append(f) # return key\n Ltag = True\n continue\n if Ltag == True:\n continue\n\n #print(f\" in dir_classify(): {f}\") # to print all the not-selected files\n ### classify modules used\n if luse:\n CLASS_instance = class_instance.upper()\n print(\" {:<10}::\".format(CLASS_instance))\n if Lwrite:\n for f in luse:\n print(f\" {f} \")\n return ukeys",
"def instantiate_classes(self, node):\n clslist = []\n for cls in node.classes:\n if cls.wrap_as == \"struct\":\n clslist.append(cls)\n options = cls.options\n if cls.wrap.python and options.PY_struct_arg == \"class\":\n self.add_struct_ctor(cls)\n self.process_class(node, cls)\n elif cls.template_arguments:\n orig_typemap = cls.typemap\n if orig_typemap.cxx_instantiation is None:\n orig_typemap.cxx_instantiation = {}\n # Replace class with new class for each template instantiation.\n # targs -> ast.TemplateArgument\n for i, targs in enumerate(cls.template_arguments):\n newcls = cls.clone()\n clslist.append(newcls)\n\n # If single template argument, use its name; else sequence.\n # XXX - maybe change to names\n # i.e. _int_double However <std::string,int> is a problem.\n if targs.fmtdict and 'template_suffix' in targs.fmtdict:\n class_suffix = targs.fmtdict['template_suffix']\n elif len(targs.asts) == 1:\n ntypemap = targs.asts[0].typemap\n if ntypemap.template_suffix:\n class_suffix = ntypemap.template_suffix\n else:\n class_suffix = \"_\" + ntypemap.flat_name\n else:\n class_suffix = \"_\" + str(i)\n\n # Update name of class.\n # name_api - vector_0 or vector_int (Fortran and C names)\n # name_instantiation - vector<int>\n if targs.fmtdict and \"cxx_class\" in targs.fmtdict:\n newcls.name_api = targs.fmtdict[\"cxx_class\"]\n else:\n newcls.name_api = cls.name + class_suffix\n newcls.name_instantiation = cls.name + targs.instantiation\n newcls.scope_file[-1] += class_suffix\n\n if targs.fmtdict:\n newcls.user_fmt.update(targs.fmtdict)\n if targs.options:\n newcls.options.update(targs.options)\n \n # Remove defaulted attributes then reset with current values.\n newcls.delete_format_templates()\n newcls.default_format()\n\n newcls.typemap = typemap.create_class_typemap(newcls)\n if targs.instantiation in orig_typemap.cxx_instantiation:\n print(\"instantiate_classes: {} already in \"\n \"typemap.cxx_instantiation\".format(targs.instantiation))\n orig_typemap.cxx_instantiation[targs.instantiation] = newcls.typemap\n\n self.template_typedef(newcls, targs)\n\n self.push_instantiate_scope(newcls, targs)\n self.process_class(newcls, newcls)\n self.pop_instantiate_scope()\n else:\n clslist.append(cls)\n self.process_class(cls, cls)\n\n node.classes = clslist",
"def add_class(self, klass):\n if not issubclass(klass, DataClayObject):\n raise DataClayException(\"Can only use DataClayObject classes\")\n\n logger.debug(\"Adding class %s to the MetaClassFactory\", klass)\n class_container = klass._prepare_metaclass(self._namespace, self._responsible_account)\n\n # Save to the list, and bookmark the MetaClass\n # (for valid recursive behaviour, e.g. cycles)\n complete_name = class_container.name\n logger.debug(\"[add_class] Using `%s` as `name` field of Type\", complete_name)\n if complete_name not in self.types:\n self.types[complete_name] = UserType(\n signature=\"L{};\".format(complete_name).replace(\".\", \"/\"),\n includes=[],\n namespace=self._namespace,\n typeName=complete_name,\n )\n self.classes.append(class_container)\n\n parent = klass.__bases__[0]\n if parent is not DataClayObject:\n self.add_class(parent)\n\n logger.debug(\"Class %s finished\", class_container.name)",
"def add(self, *args, **kwargs):\n obj = self._class(*args, **kwargs)\n self._items.append(obj)",
"def _add_class_to_map(self, model_key, class_dict):\n # Extract the class name.\n class_name = class_dict['class']\n\n try:\n # Attempt to access this class by name in the map.\n self.model_map['class'][class_name]\n except KeyError:\n # Class object does not exist. Map it.\n self.model_map['class'][class_name] = [model_key, class_dict]\n else:\n # This class name already exists, which will lead to\n # duplicates and failure.\n raise ItemExistsError('Class {} already exists in the map.'\n .format(class_name))"
]
| [
"0.7390039",
"0.6293409",
"0.6051495",
"0.5877191",
"0.58644205",
"0.58146465",
"0.5810419",
"0.57066",
"0.56604487",
"0.5597816",
"0.5594408",
"0.5591665",
"0.5577792",
"0.55187804",
"0.546726",
"0.54580706",
"0.54314536",
"0.5428655",
"0.53475636",
"0.5345708",
"0.53414875",
"0.5338418",
"0.5294596",
"0.5290662",
"0.52368",
"0.52226526",
"0.5203194",
"0.52029276",
"0.5185247",
"0.51724976"
]
| 0.70700693 | 1 |
Method edit class accepts user inputs to change fields of a userselected class in the list | def editClass():
noChangesMade = True #Keeps track if changes were made at all
displayClassList(True)
if len(classes) == 0: #if no classes, return
return
print("\nWhich class would you like to edit?")
classIndex = input("Choice: ")
while not properMenuChoice(classIndex):
classIndex = input("Choice: ")
if int(classIndex) == len(classes) + 1: #If the user selected the "none" option from displayClassList
return
classIndex = int(classIndex)
class_ = classes[classIndex-1]
print("\nEnter the data for the class. Press <Enter/Return> to leave unchanged.")
newClassDay = input("Enter new day for class: ")
if(newClassDay.islower() or newClassDay.isupper()): #If the user put in some kind of string
while not properDayInput(newClassDay): #Make sure that it is a valid day of the week
newClassDay = input("Please enter a day of the week: ")
class_.setDay(Day(newClassDay))
noChangesMade = False
newClassName = input("Enter new name for class: ")
if not newClassName.strip() == "": #Check for all blanks/spaces
class_.setName(newClassName.upper())
noChangesMade = False
newStartTime = input("Enter new starting time for class: ")
if not newStartTime.strip() == "": #Check for entry
while not properTimeInput(newStartTime): #persist for proper entry
newStartTime = input("Enter a valid new starting time (24 hr format): ")
class_.setstartTime(newStartTime)
noChangesMade = False
newEndTime = input("Enter new ending time for class: ")
if not newStartTime.strip() == "":
while not properTimeInput(newEndTime):
newEndTime = input("Enter a valid new ending time (24 hr format): ")
class_.setendTime(newEndTime)
noChangesMade = False
if noChangesMade:
print("\n No Changes Made")
delay()
else:
print("\nChanges Made")
delay() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit(self):\n\n pass",
"def edit(self, **kwargs):\n ...",
"def edit():",
"def edit_contact(self):\n edit_data = input(\"Enter the first name of user you want to edit\\n\")\n\n for contact in self.contact_list:\n if contact.first_name == edit_data:\n user_input = int(input(\n \"Enter the number that you want to edit field in details\"\n \" \\n 1. First Name 2. Last name 3. Address 4. city 5. state 6.zip 7. Phone number 8.Email \\n\"))\n if user_input == 1:\n first_name = input(\"Enter new first name\\n\")\n contact.first_name = first_name\n elif user_input == 2:\n last_name = input(\"Enter new last name\\n\")\n contact.last_name = last_name\n elif user_input == 3:\n address = input(\"Enter new address\\n\")\n contact.address = address\n elif user_input == 4:\n city = input(\"Enter new city\\n\")\n contact.city = city\n elif user_input == 5:\n state = input(\"Enter new state\\n\")\n contact.state = state\n elif user_input == 6:\n zip = input(\"Enter new zip\\n\")\n contact.zip = zip\n elif user_input == 7:\n phone_number = input(\"Enter new phone number\\n\")\n contact.phone_number = phone_number\n elif user_input == 8:\n email = input(\"Enter new email\\n\")\n contact.email = email\n else:\n print(\"Please enter a valid input\")\n else:\n print(\"Please enter a valid name\")",
"def on_edit(self, dataobj):",
"def changeClass(self, newClass):\n\t\turl = \"https://habitica.com/api/v3/user/change-class?class=\" + newClass\n\t\treturn(postUrl(url, self.credentials))",
"def update(self):\n\t\tprint(\"Editing %s '%s'\" % (self.getSpecString(), self.getName()))\n\t\tchoice = None\n\t\twhile (choice != 5):\n\t\t\tchoice = None \t\n\t\t\twhile (choice != 1 and choice != 2 and choice != 3 and choice != 4 and choice != 5):\n\t\t\t\tprint(\"Please select an action\")\n\t\t\t\tprint(\" 1) Edit name\")\n\t\t\t\tprint(\" 2) Edit description\")\n\t\t\t\tprint(\" 3) Add item\")\n\t\t\t\tprint(\" 4) Remove item\")\n\t\t\t\tprint(\" 5) Save and exit\")\n\t\t\t\tchoice = self.askForInteger(\"Action\")\n\n\t\t\t\tif (choice != 1 and choice != 2 and choice != 3 and choice != 4 and choice != 5):\n\t\t\t\t\tprint(\"Invalid choice!\")\n\n\t\t\tif (choice == 1):\n\t\t\t\tself.setName(self.askForString(\"You erase the list's title and write\"))\n\t\t\telif (choice == 2):\n\t\t\t\tself.setDescription(self.askForString(\"You update the list's description to read\"))\n\t\t\telif (choice == 3):\n\t\t\t\tself.addItem(self.askForString(\"Add to list\"))\n\t\t\telif (choice == 4):\n\t\t\t\tprint(self.getAllItemsStr())\n\t\t\t\tremoveIndex = self.askForInteger(\"Remove entry\")\n\t\t\t\tprint(\"Removing %s...\" % (self.items[removeIndex - 1]))\n\t\t\t\tself.removeItem(removeIndex - 1)\n\t\t\telif (choice == 5):\n\t\t\t\tprint(\"Saving %s...\" % self.getSpecString())\n\t\t\t\tself.setUpdatedAt(datetime.datetime.now())\n\t\t\t\tself.refreshYAML()\n\t\t\t\tprint(\"Saved!\")",
"def do_update(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n return\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n elif len(coms) < 3:\n print(\"** attribute name missing **\")\n elif len(coms) < 4:\n print(\"** value missing **\")\n else:\n typecast = type(eval(coms[3]))\n form = coms[3].strip('\"')\n form = form.strip(\"'\")\n setattr(storage.all()[obj], coms[2], typecast(form))",
"def on_edit_students_select(self):\n edit_window = Students()\n edit_window.exec_()",
"def edit_object(self,item, field_name, new_value):\n\t\treturn self.item_edit_field(item=item, field_name=field_name, new_value=new_value)",
"def do_edit(self, args):\n member = None\n rowid = args.split(' ')[0]\n \n # loop till we get a rowid which matches a member in the database\n while True:\n rowid = self.validateRowid(rowid)\n if rowid is None:\n rowid = input('Enter member id: ')\n continue\n \n member = self.roster.get(rowid)\n if member is None:\n print(\"No member with id of %d\" % rowid)\n # rowid will get validated again, but it's the same value\n # which already passed validation\n continue\n \n break\n \n print('Editing %s %s' % (member.first, member.last))\n print('Type new value, hit enter to keep current value, or enter spaces to clear a value')\n member.first = self.getNewValue('First name', member.first)\n member.last = self.getNewValue('Last name', member.last)\n member.introducedDate = self.getNewValue('introduced date', member.introducedDate) \n \n self.roster.update(member)",
"def edit_user(self):\n from editWindow import EditPlayer\n self.edit = EditPlayer(self.lang, self.result_table.currentItem().text())\n self.edit.show()",
"def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()",
"def DoEdit(self,event):\r\n raise UncodedError",
"def edit_tools(self, e):\n #GETTING SELECTION\n\n self.selected_item = self.user_inventory.selection()\n self.select_name = self.user_inventory.item([i for i in self.selected_item], \"values\")[0]\n self.select_entdate = self.user_inventory.item([i for i in self.selected_item], \"values\")[3]\n\n self.df_same_name = self.df_user.query(\"title == @self.select_name\")\n #this is the selected one for sure\n self.df_the_selected_item = self.df_same_name.loc[self.df_same_name[\"entry date\"] == self.select_entdate]\n\n #GETTING THE INDEX NUMBER OF THE SELECTION IN .CSV FILE\n self.index_select = self.df_the_selected_item.index\n self.index_select_number = self.index_select.tolist()\n\n #bottom buttons appear:\n self.changing_item_label.config(text=\"Now editing \"+self.select_name+\" that added on \"+self.select_entdate+\":\")\n\n self.delete_but = Button (self.bottom_frame, text=\"DELETE\", command=self.delete_button)\n self.delete_but.place(relx=0.1, rely=0.7, relwidth=0.28, anchor=\"w\")\n\n self.servings_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n self.serv_drop = Combobox(self.bottom_frame, value=self.servings_list, state=\"readonly\")\n self.serv_drop.place(relx=0.5, rely=0.7, relwidth=0.2, anchor=CENTER)\n\n \n self.serv_but = Button(self.bottom_frame, text=\"CHANGE AMOUNT\", command=self.change_amount_button, state=\"disabled\")\n self.serv_but.place(relx=0.9, rely=0.7, relwidth=0.28, anchor=\"e\")\n\n self.serv_drop.bind(\"<<ComboboxSelected>>\", self.activate_button)",
"def _edit_user(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows: \")\n for i in users:\n print(users[i][\"name\"])\n #List specific user's settings and get user id\n userID = self._list_user_settings(users)\n #Loop until valid option given\n option = False\n while not option:\n option = input(\"Please enter the setting you would like to change: \")\n if option not in users[userID]:\n option = False\n print(\"That setting is not valid.\")\n #Get input for new setting\n args = input(\"Please enter what you would like to change that setting to: \")\n #Output\n command = \"edit_user {0} {1} {2}\\r\\n\".format(userID, option, args)\n return(command)",
"def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def showEditContact(self):",
"def getEditForm( self ):\n return \"listc_edit\"",
"def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))",
"def dummy():\n\t\t\tself.edit = True",
"def home_edituser():\n\tpass",
"def edit_button_clicked(self, obj):\n handle = self.get_selected()\n if handle:\n note = self.dbstate.db.get_note_from_handle(handle)\n try:\n from .. import EditNote\n EditNote(self.dbstate, self.uistate, self.track, note,\n callertitle = self.callertitle,\n extratype = [self.notetype] )\n except WindowActiveError:\n pass",
"def update_command():\n # global selected_tuple\n backend.update(selected_tuple[0], \n title_text.get(), \n author_text.get(), \n year_text.get(), \n isbn_text.get())",
"def on_directory_list_row_activated(self, *args):\n\t\tself.on_button_edit_clicked(self.buttonEdit)",
"def on_Add_new_class_button_clicked(self):\n Add_new_class = Shangke_message()\n Add_new_class.exec_()\n result = Shangke_message.result\n # print(result)\n self.lineEdit.setText(result['Cname'])\n self.lineEdit_2.setText(result['Sclass'])\n self.lineEdit_3.setText(result['ClassTime'])\n self.lineEdit_4.setText(result['Tno'])\n self.lineEdit_6.setText(result['Date'])",
"def EditLabel(self, item):\r\n \r\n self.Edit(item)",
"def on_edit_clicked(self, obj):\n store, node = self.list.selection.get_selected()\n if not node:\n return\n \n name = cuni(self.list.model.get_value(node, 0))\n if name == _('default'): # the default style cannot be edited\n return\n style = self.sheetlist.get_style_sheet(name)\n StyleEditor(name, style, self)",
"def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content",
"def OnInfoEdit(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n item = self.items[selections[0]]\r\n if self.gInfoBox.IsModified():\r\n self.data.setInfo(item,self.gInfoBox.GetValue())"
]
| [
"0.6598314",
"0.6395565",
"0.6206384",
"0.59057254",
"0.58150303",
"0.5813324",
"0.5805739",
"0.57658625",
"0.57134074",
"0.5709039",
"0.5703661",
"0.5579027",
"0.5497685",
"0.54963166",
"0.5485881",
"0.54812586",
"0.5473928",
"0.54522747",
"0.54284304",
"0.5409104",
"0.5364954",
"0.5345945",
"0.53292114",
"0.53251714",
"0.53245425",
"0.5314053",
"0.5312079",
"0.5283767",
"0.52770066",
"0.5267206"
]
| 0.69180167 | 0 |
Method delete class deletes a userselected class from the ongoing list of classes | def deleteClass():
displayClassList(True)
if len(classes) == 0:
return
print("\nWhich class would you like to delete?")
classIndex = input("Choice: ")
while not properMenuChoice(classIndex):
classIndex = input("Please enter a valid menu choice: ")
if int(classIndex) == len(classes) + 1: #Return if choice is None from displayClassList
return
classIndex = int(classIndex)
className = classes[classIndex-1].getName()
classDay = classes[classIndex-1].getDay()
del classes[classIndex-1]
print("\nDeleted " + className + " on " + str(classDay))
delay() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, class_name):\n\n return self.client.delete(Classes.PATH_CLASS_TEMPLATE.format(class_name=class_name))",
"def do_destroy(self, arg):\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n coms = tuple(arg.split())\n if coms[0] not in self.cls:\n print(\"** class doesn't exist **\")\n elif len(coms) < 2:\n print(\"** instance id missing **\")\n else:\n obj = coms[0] + \".\" + coms[1]\n if obj not in storage.all().keys():\n print(\"** no instance found **\")\n else:\n del storage.all()[obj]\n storage.save()",
"def delete_from(class_reference, custom_condition='', **attr_dict):\n _entries = select_from(class_reference, custom_condition=custom_condition, **attr_dict) \n _del = 0\n for _entry in _entries:\n _entry.delete()\n _del += 1\n return _del",
"def do_destroy(self, *args):\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) != 2:\n print(\"** instance id missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n del dict_objs[key]\n storage.save()\n else:\n print(\"** no instance found **\")",
"def do_destroy(self, args):\n args = args.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n if len(args) == 1:\n print(\"** instance id missing **\")\n return\n if args[0] not in HBNBCommand.class_check:\n print(\"** class doesn't exist **\")\n return\n\n all_objs = storage.all()\n key = args[0] + '.' + args[1]\n if key in all_objs:\n all_objs.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")",
"def clear_class(self) -> None:\n logging.info(f\"clear element class. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.removeAttribute(\"class\");\"\"\"\n self._execute_javascript(js)",
"def do_destroy(self, arg):\n arg = arg.split()\n try:\n args = arg[0] + \".\" + arg[1]\n except:\n pass\n objects = storage.all()\n if len(arg) is 0:\n print(\"** class name missing **\")\n elif len(arg) == 1 and arg[0] in self.dict.keys():\n print(\"** instance id missing **\")\n elif arg[0] not in self.dict.keys():\n print(\"** class doesn't exist **\")\n elif args not in objects:\n print(\"** no instance found **\")\n else:\n del objects[args]\n storage.save()",
"def do_destroy(self, arg):\n args = arg.split()\n if len(args) == 0:\n print(\"** class name missing **\")\n return\n elif len(args) < 2 and args[0] in self.class_dict:\n print(\"** instance id missing **\")\n return\n elif len(args) < 2:\n print(\"** class name missing **\")\n return\n\n object_dict = storage.all()\n if args[0] in self.class_dict:\n for full_key in object_dict:\n key = full_key.split(\".\")\n if key[1] == args[1]:\n del object_dict[full_key]\n storage.save()\n return\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n if len(args) == 0:\n print(\"** class name missing **\")\n elif args[0] in class_type:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def do_destroy(self, line):\n list_line = line.split(' ')\n if line == \"\":\n print(\"** class name missing **\")\n elif list_line[0] not in HBNBCommand.classes.keys():\n print(\"** class doesn't exist **\")\n elif len(list_line) < 2:\n print(\"** instance id missing **\")\n elif list_line[0] + '.' + list_line[1] not in \\\n models.storage.all().keys():\n print(\"** no instance found **\")\n else:\n models.storage.all().pop(list_line[0] + '.' + list_line[1], None)\n models.storage.save()",
"def do_destroy(self, args):\n args = shlex.split(args)\n if len(args) == 0:\n print(\"** class name missing **\")\n return False\n if args[0] in classes:\n if len(args) > 1:\n key = args[0] + \".\" + args[1]\n if key in models.storage.all():\n models.storage.all().pop(key)\n models.storage.save()\n else:\n print(\"** no instance found **\")\n else:\n print(\"** instance id missing **\")\n else:\n print(\"** class doesn't exist **\")",
"def delete_obj_class(self, obj_class_name: str) -> ProjectMeta:\n return self.delete_obj_classes([obj_class_name])",
"def do_destroy(self, arg):\n arg_list = arg.split(\" \") if type(arg) == str else arg\n if not arg:\n print(\"** class name missing **\")\n return\n if arg_list[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n return\n if len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n key = arg_list[0] + \".\" + arg_list[1]\n if key in storage.all():\n del storage.all()[key]\n storage.save()\n return\n print(\"** no instance found **\")",
"def delete(self):\n ...",
"def do_destroy(self, line):\n try:\n tokens = split(line)\n except ValueError:\n return None\n if len(tokens) < 1:\n print(\"** class name missing **\")\n else:\n objects = models.storage.all()\n cls = models.getmodel(tokens[0])\n if cls is None:\n print(\"** class doesn't exist **\")\n elif len(tokens) < 2:\n print(\"** instance id missing **\")\n elif \".\".join(tokens[:2]) not in objects:\n print(\"** no instance found **\")\n else:\n del objects[\".\".join(tokens[:2])]\n models.storage.save()",
"def help_destroy(self):\n print(\"delete an instance based on the class name and id\")",
"def do_destroy(self, line):\n args = line.split()\n\n if not args:\n print(\"** class name missing **\")\n elif args[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n elif len(args) < 2:\n print(\"** instance id missing **\")\n else:\n key = args[0] + \".\" + args[1]\n dict_objects = storage.all()\n obj = dict_objects.get(key)\n if obj:\n dict_objects.pop(key)\n storage.save()\n else:\n print(\"** no instance found **\")",
"def deleteClass(X,y,num,c):\n\t\n\ttwoIndex=np.array([i for i in range(len(y)) if y[i]==c])\n\tnp.random.shuffle(twoIndex)\n\n\tif num >= 0.7*len(twoIndex):\n\t\tprint('Number of examples requested for delete too many...')\n\t\texit()\n\n\n\tdelIndex=twoIndex[0:num]\n\n\tX=np.delete(X,delIndex,0)\n\ty=np.delete(y,delIndex,0)\n\n\tprint(X.shape,y.shape)\n\n\treturn(X,y)",
"def delete_vs_class(vs_class_name, created_objects):\r\n if keep_objects:\r\n return\r\n custom_object_api_instance = client.CustomObjectsApi()\r\n try:\r\n custom_object_api_response = custom_object_api_instance.delete_cluster_custom_object(\r\n group=\"snapshot.storage.k8s.io\",\r\n version=\"v1\",\r\n plural=\"volumesnapshotclasses\",\r\n name=vs_class_name\r\n )\r\n LOGGER.debug(custom_object_api_response)\r\n LOGGER.info(f\"Volume Snapshot Class Delete : {vs_class_name} deleted\")\r\n created_objects[\"vsclass\"].remove(vs_class_name)\r\n except ApiException as e:\r\n LOGGER.error(f\"Exception when calling CustomObjectsApi->delete_cluster_custom_object_0: {e}\")\r\n clean_with_created_objects(created_objects)\r\n assert False",
"def delete_menu():",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete_classification_head(self) -> None:\n del self.model.classifier",
"def do_destroy(self, args):\n args = shlex.split(args)\n dicti = storage.all()\n if not args:\n print(\"** class name missing **\")\n elif not args[0] in name_of_class:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif \"{}.{}\".format(args[0], args[1]) in dicti:\n dicti.pop(\"{}.{}\".format(args[0], args[1]))\n storage.save()\n else:\n print(\"** no instance found **\")",
"def delete_fixture_class(self, class_id):\n with self._class_lock:\n existing_fix = self.instances.find_one({'class_id': class_id, 'status': {'$ne': InstanceStatus.DELETED}})\n if existing_fix:\n raise AXIllegalOperationException(\"Fixtures belonging to class {} should be deleted prior to removal\"\n .format(existing_fix['class_name']))\n self.axdb_client.delete_fixture_class(class_id)\n logger.info(\"Deleted %s\", class_id)\n return class_id",
"def deleteSelectedSegs(self):\n inds = []\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n inds.append(ix)\n\n if len(inds)==0:\n print(\"No segments selected\")\n return\n\n self.segsChanged = True\n for ix in reversed(inds):\n del self.segments[ix]\n del self.picbuttons[ix]\n\n # update self.clusters, delete clusters with no members\n todelete = []\n for ID, label in self.clusters.items():\n empty = True\n for seg in self.segments:\n if seg[-1] == ID:\n empty = False\n break\n if empty:\n todelete.append(ID)\n\n self.clearButtons()\n\n # Generate new class labels\n if len(todelete) > 0:\n keys = [i for i in range(self.nclasses) if i not in todelete] # the old keys those didn't delete\n # print('old keys left: ', keys)\n\n nclasses = self.nclasses - len(todelete)\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in keys:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n labels = dict(labels)\n # print(labels)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before delete: ', self.clusters)\n self.clusters = clusters\n print('after delete: ', self.clusters)\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n\n self.nclasses = nclasses\n\n # redraw the buttons\n self.updateButtons()\n self.completeChanged.emit()",
"def unregister_insert(self, class_registered):\r\n insert_class = class_registered(self)\r\n\r\n if self.insert_classes.get(insert_class.base_url()):\r\n self.insert_classes.pop(insert_class.base_url())",
"def deleteClass(X, y, num, c):\n\n twoIndex = np.array([i for i in range(len(y)) if y[i] == c])\n np.random.shuffle(twoIndex)\n\n delIndex = twoIndex[0:num]\n\n X = np.delete(X, delIndex, 0)\n y = np.delete(y, delIndex, 0)\n\n return(X, y)"
]
| [
"0.6809871",
"0.6432882",
"0.63366604",
"0.63275844",
"0.62082964",
"0.61836684",
"0.6160332",
"0.6156988",
"0.6125214",
"0.61221415",
"0.6102454",
"0.6061765",
"0.6025444",
"0.5985825",
"0.59802514",
"0.5950053",
"0.5929951",
"0.59204936",
"0.58836925",
"0.58612436",
"0.58529115",
"0.58529115",
"0.58529115",
"0.58529115",
"0.5816843",
"0.5805205",
"0.5795918",
"0.5781962",
"0.5781226",
"0.577475"
]
| 0.76067334 | 0 |
Method printSchedule prints a formated schedule to the Terminal | def printSchedule():
print("{0:^45}".format("Your Schedule:\n"))
print(" Day Class Time")
if(len(classes) == 0):
print("\nThere are no classes\n")
return
for class_ in classes:
print(class_.scheduleString())
print() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"",
"def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())",
"def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")",
"def printEnumeratedSchedule(dictSchedule):\n for i, (strStop, val) in enumerate(dictSchedule.items()):\n print(strStop, *val, sep=\":\")",
"def printSchedule(dictBusSchedule):\n\n strPrint = \"\"\n for strStop in dictBusSchedule:\n strPrint = \"\"\n lstTimes = dictBusSchedule[strStop]\n for strTime in lstTimes:\n if strPrint == \"\":\n strPrint = strStop + \": \" + strTime\n else:\n strPrint = strPrint + \":\" + strTime\n\n print(strPrint)",
"def schedule_text():",
"def print_schedule():\n clear_screen()\n print(\"====Current Schedule====\")\n days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']\n with open('current_courses.json', 'r') as current_file:\n schedule = json.load(current_file)\n for day in days:\n for val, val2 in schedule.items():\n if day in val2[0]:\n print(day, val, str(val2[1])+'-'+str(val2[2])+\" Presumed Grade: \"+ val2[3])\n return 0",
"def print_team_schedule(\n sch: Schedule,\n team: str,\n team_list: list[str],\n capt_list: list[str],\n outfile: typing.Union[str, TextIOWrapper] = \"print\",\n):\n if outfile == \"print\":\n\n def pline(txt):\n print(txt)\n\n else:\n\n def pline(txt):\n outfile.write(txt + \"\\n\")\n\n line = \"\"\n\n pline(\"\\nTeam: \" + team + \"\\n\")\n for rnd in range(sch.nrounds):\n _rnd = sch.rounds[rnd]\n line = f\"{_rnd.play_date}\"\n game_not_found = True\n match = 0\n while game_not_found and match < _rnd.nmatches:\n _match = _rnd.matches[match]\n if _match.home == team:\n _teamidx = team_list.index(_match.away)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.away:\n line = line + f\" vs. {_match.away} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n elif _match.away == team:\n _teamidx = team_list.index(_match.home)\n _capt = capt_list[_teamidx]\n if \"Bye\" not in _match.home:\n line = line + f\" @ {_match.home} ({_capt})\"\n else:\n line = line + \" --- BYE ---\"\n game_not_found = False\n else:\n match = match + 1\n if game_not_found:\n logging.warning(\"Bye week is not expected.\")\n line = line + \"Bye Week\"\n pline(line)",
"def format_schedule(schedule: object) -> str:\n task_start = f'{schedule[\"job_type\"].lower()}'\n frequency_part = 'every'\n if schedule['frequency'] == 1:\n period = schedule[\"period\"].lower()\n period = period if period[-1] != 's' else period[:-1]\n frequency_part += f' {period}'\n else:\n frequency_part += f' {schedule[\"frequency\"]} {schedule[\"period\"].lower()}'\n time_part = f' at {schedule[\"time\"]}' if 'time' in schedule else ''\n return f'{task_start} {frequency_part}{time_part}'",
"def __str__(self):\n schedule = \"\"\n\n schedule_list = self.in_order_traversal()\n for node in schedule_list:\n schedule += str(node)\n if node is not schedule_list[-1]:\n schedule += \" \"\n return schedule",
"def output_schedule(self) -> None:\n with open(\"Output.txt\", \"w\") as out_file:\n for sem in self.plan:\n out_file.write(sem.title.center(15 + 20 + 50 + 5) + \"\\n\\n\")\n for course in sem.required_courses:\n if course.special:\n out_file.write(\"*\" * 10 + \" \" * 5 + f\"{course.special_type}\\n\")\n elif course.grade != \"\":\n out_file.write(\n course.sem_taken.ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + course.grade.ljust(5)\n + \"\\n\"\n )\n else:\n out_file.write(\n \"AP/UNK\".ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + \"AP/UNK\".ljust(5)\n + \"\\n\"\n )\n out_file.write(\"\\n\\n\")",
"def show( self, style = 1 ):\r\n if style == 1:\r\n print( \"INFO: print the schedule in style %d\" % style )\r\n elif style == 2:\r\n print( \"INFO: print the schedule in style %d\" % style )\r\n elif style == 3:\r\n print( \"INFO: print the schedule in style %d\" % style )\r\n else:\r\n print( \"WARN: invalid print schedule style %s\" % style )",
"def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))",
"def human_firendly_print_repository_scheduled_tasks(scheduled):\n name_pad = 5\n for name in scheduled:\n if len(name) > name_pad:\n name_pad = len(name)\n name_pad += 1\n\n header = f'{\"Name\":<{name_pad}}| Task type | Next run'\n print('Scheduled tasks:')\n print(header)\n print('-' * (len(header) + 5))\n\n for task in scheduled.values():\n print(f'{task[\"name\"]:<{name_pad}}| {task[\"task_type\"].title():<10}| {task[\"next_run\"]}')",
"def schedule_paragraph():",
"def _print_live_ranges(self, schedule):\n range_starts = {}\n range_ends = {}\n\n for num, node, impl in schedule:\n for output in node.outputs:\n range_starts[output] = num\n for input in node.inputs:\n if input in range_starts:\n range_ends[input] = num\n\n print(\"Live Ranges:\")\n for name in sorted(range_starts.keys()):\n print(\"{:^5}\".format(name), end=\"\")\n print()\n\n for num, _, _ in schedule:\n for name in sorted(range_starts.keys()):\n if num < range_starts[name]:\n print(\" \", end=\"\")\n elif num == range_starts[name]:\n print(\" s \", end=\"\")\n elif num < range_ends[name]:\n print(\" | \", end=\"\")\n elif num == range_ends[name]:\n print(\" e \", end=\"\")\n else:\n print(\" \", end=\"\")\n print()",
"def __repr__(self):\n return \"Schedule(month = %s, week = %s, dayofweek = %s, dayofmonth = %s, hour = %s, minute = %s)\" % (repr(self.month), repr(self.week), repr(self.dayofweek), repr(self.dayofmonth), repr(self.hour), repr(self.minute))",
"def __repr__(self) -> str:\n # start the textual representation off with a newline\n weekly_schedule = \"\\n\"\n # iterate through each day of the week in the schedule\n for day in self.schedule.keys():\n # add the name of the current day of the week\n weekly_schedule += day + \":\\n\\n\\t\"\n # access the schedule for the current day of the week\n schedule_for_day = self.schedule[day]\n # add all of the classes of the current day of the week\n weekly_schedule += \"\\n\\t\".join(map(str, schedule_for_day))\n # for course in schedule_for_day:\n # weekly_schedule += str(course) + \"\\n\"\n # if not processing the last day of the week, add two newlines\n if day != list(self.schedule.keys())[-1]:\n weekly_schedule += \"\\n\\n\"\n # if processing the last day of the week, add one newline\n else:\n weekly_schedule += \"\\n\"\n # return a string that displays all courses taught in a week\n return weekly_schedule",
"def output_schedule_brief(cout, courses_to_schedule_d, courses_to_mt_d):\n cout.writerow([\"CourseCode\",\"DayWeek\",\"Start\",\"End\",\"Campus\"])\n\n # first write out the courses we just scheduled\n for cn in sorted(courses_to_schedule_d.keys()):\n meeting_time = courses_to_mt_d[cn]\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n ct = ss.meeting_time_to_course_time(meeting_time)\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])\n\n # Now write out all the other courses\n for cn in sorted(sched_d.keys()):\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n cts = sched_d[cn]\n for ct in cts:\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])",
"def get_schedule_string(self):\n schedule = \"\"\n for entry in self.entries:\n schedule += entry.get_entry_string()\n\n return schedule",
"def schedule_entry(self):\n\n return \" \".join(map(str, [self.schedule_time, self.schedule_type,\n self.schedule_scale_factor, self.schedule_prefix]))",
"def respond_schedule(schedule: dict) -> None:\n if schedule is None:\n response = communication.Response(\n success=False,\n message='Unable to read schedule.',\n command=communication.SCHEDULER_GET_SCHEDULE\n )\n else:\n response = communication.Response(\n success=True,\n message=schedule,\n command=communication.SCHEDULER_GET_SCHEDULE\n )\n communication.scheduler_queue_output.put(response)",
"def log_schedule(self):\n self.logger.log_schedule(self.params.schedule)",
"def schedule(self) -> pulumi.Input['ScheduleArgs']:\n return pulumi.get(self, \"schedule\")",
"def output_schedule_registrar(cout, schedule_d, courses_to_mt_d):\n\n schedule_score.output_course_schedule(cout, make_sched_d_from_solution(schedule_d, courses_to_mt_d))",
"def list_schedules(session, logger):\n for sched in session.query(Schedule).all():\n logger.info(\"- {}\".format(sched))",
"def display_schedule(schedule):\n\n def display_patches(patches_sequence, margin=8):\n \"\"\"\n Displays a sequence of MatPlotLib patches in a MatPlotLib window\n :param patches_sequence: the patches to display\n :param margin:\n :return:\n \"\"\"\n plt.rcdefaults()\n fig, ax = plt.subplots()\n for p in patches_sequence:\n ax.add_patch(p)\n max_machines = max(rect.get_y() for rect in patches_sequence) + 1\n max_jobs = max(rect.get_x() + margin for rect in patches_sequence)\n plt.axis([0, max_jobs, 0, max_machines])\n plt.show()\n\n patches = list()\n colors = [\"black\", \"darksalmon\", \"DarkKhaki\", \"DarkViolet\", \"red\", \"blue\", \"green\", \"cyan\", \"magenta\", \"yellow\",\n \"black\", \"IndianRed\", \"Pink\", \"Lavender\", \"DarkOrange\", \"GreenYellow\", \"Teal\", \"SteelBlue\",\n \"MidnightBlue\", \"Maroon\", \"DimGray\"]\n\n for i, prof in enumerate(schedule):\n prof = prof[\"Exams\"]\n for eleve, heure in prof.items():\n rekt = mpatches.Rectangle((heure, i), durations[i], 1, color=colors[eleve], ec=\"black\")\n patches.append(rekt)\n\n display_patches(patches)",
"def schedule(self) -> pulumi.Output['outputs.CanarySchedule']:\n return pulumi.get(self, \"schedule\")",
"def schedule(self) -> pulumi.Input['CanaryScheduleArgs']:\n return pulumi.get(self, \"schedule\")",
"def schedule(self, schedule):\n \n self._schedule = schedule"
]
| [
"0.8127595",
"0.7954059",
"0.73060447",
"0.7019486",
"0.68429357",
"0.6830832",
"0.67173177",
"0.66557765",
"0.6581177",
"0.6573826",
"0.6553859",
"0.64818215",
"0.63623947",
"0.63557285",
"0.6313227",
"0.62555295",
"0.6238557",
"0.6235394",
"0.6199181",
"0.6183564",
"0.61498785",
"0.6147396",
"0.6100106",
"0.6083734",
"0.60828775",
"0.60201883",
"0.5926859",
"0.5904907",
"0.58681494",
"0.58532435"
]
| 0.825305 | 0 |
Method genScheduleCSV prints a formated schedule to the Terminal and exports to a csv file | def genScheduleCSV():
try:
printSchedule()
save_class_list()
print("\nSchedule generated, check working directory")
except Exception as e:
print("Exception found" + str(e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_class_list():\r\n try:\r\n classStringList.clear() #clear the classString List\r\n for i in range(0,len(classes)):\r\n classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes\r\n f = open(\"mySchedule.csv\", 'w', newline ='')\r\n csv.writer(f).writerow([\"Day\", \"Class\", \"Start Time\", \"End Time\"])\r\n for classCSVString in classStringList:\r\n csv.writer(f).writerow(classCSVString)\r\n f.close()\r\n except Exception as e:\r\n print(\"Exception found:\" + e)",
"def output_schedule(self) -> None:\n with open(\"Output.txt\", \"w\") as out_file:\n for sem in self.plan:\n out_file.write(sem.title.center(15 + 20 + 50 + 5) + \"\\n\\n\")\n for course in sem.required_courses:\n if course.special:\n out_file.write(\"*\" * 10 + \" \" * 5 + f\"{course.special_type}\\n\")\n elif course.grade != \"\":\n out_file.write(\n course.sem_taken.ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + course.grade.ljust(5)\n + \"\\n\"\n )\n else:\n out_file.write(\n \"AP/UNK\".ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + \"AP/UNK\".ljust(5)\n + \"\\n\"\n )\n out_file.write(\"\\n\\n\")",
"def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])",
"def output_schedule_brief(cout, courses_to_schedule_d, courses_to_mt_d):\n cout.writerow([\"CourseCode\",\"DayWeek\",\"Start\",\"End\",\"Campus\"])\n\n # first write out the courses we just scheduled\n for cn in sorted(courses_to_schedule_d.keys()):\n meeting_time = courses_to_mt_d[cn]\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n ct = ss.meeting_time_to_course_time(meeting_time)\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])\n\n # Now write out all the other courses\n for cn in sorted(sched_d.keys()):\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n cts = sched_d[cn]\n for ct in cts:\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])",
"def export_to_csv(self, log):\n if os.path.isfile(self.GENERATE_FILE):\n os.remove(self.GENERATE_FILE)\n\n with open(self.GENERATE_FILE, \"w\") as f:\n f.write(\"date, time, username, succes, label\\n\")\n\n for entry in log:\n f.write(str(entry[0].date()) + \", \"\n + str(self.hms_to_seconds(entry[0])) + \", \"\n + str(entry[1]) + \", \"\n + str(entry[2]) + \", \"\n + str(entry[3])\n + \"\\n\")",
"def print_csv():\n # read lines, and make the first a link\n show_played = request.args.get('showPlayed', 'true') == 'true'\n show_out_of_office = request.args.get('showOutOfOffice', 'true') == 'true'\n songs = database.load_songs(include_played=show_played, include_out_of_office=show_out_of_office)\n entries = [_convert_first_href(str(x)) for x in songs]\n header_line = \"YouTube Link,Played,Song Name,Added by\\n\"\n return \"%s%s\" % (header_line, \"\\n\".join(entries))",
"def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)",
"def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')",
"def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()",
"def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()",
"def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()",
"def pp_schedule(filename,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #timelist stands for timetable list\n #format of timelist: [[day,[start,end,module],...],...]\n timelist = []\n for k in range(1,8):\n #for each day code add day code\n timelist.append([k])\n #assign and make list for ttint\n #for all lines in file\n for l in range(len(incsv)):\n #if venue in line matches desired venue\n if incsv[l][0][7] == venue:\n #after each day code, add a list with start time, end time and module. Repeat for each relevant line\n timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])\n #turtle\n print(\"Your timetable is being printed on Python Turtle Graphics. This may take a while.\")\n ttint(timelist,venue)",
"def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()",
"def csvOutput(cycle, fctimes, beachdata, offshoredata, surfdata, fname='isurf_output.csv', outdir='.'):\n\n datestr = cycle.strftime('%Y%m%d00')\n\n with open(outdir+'/%s' %fname,'w') as outp:\n outp.write(datestr+'\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('\\r\\n')\n outp.write('%s' %beachdata['name'][isite] + '\\r\\n')\n outp.write('%d' %beachdata['type'][isite] + '\\r\\n')\n #outp.write('TI Hsmo Tpmo Dmo Hseq Tpeq DmEq Hsbr Dpbr\\r\\n')\n #outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Tide,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n\n\t # write out to file\n for itime in range(len(fctimes)):\n\n # write out the data values to file\n\t #outp.write ('%02d' %fctimes[lp] + ' %4.2f %4.1f %3d' %tuple([hm0[lp,isite], tp[lp,isite], dirn[lp,isite]]) + \\\n # ' %4.2f %4.1f %3d' %tuple([hsshwd[lp,isite], tpshwd[lp,isite], reldir[lp,isite]]) + ' %4.2f %4.2f' %tuple([hsbkinit[lp,isite], dpsat[lp,isite]]) + '\\r\\n')\n\t outp.write('%02d' %fctimes[itime] + \\\n ',%4.1f' %offshoredata['wspd'][itime,isite] + \\\n #',%3d' %offshoredata['wdir'][itime,isite] + \\\n ',%4.2f' %offshoredata['hm0'][itime,isite] + \\\n ',%4.1f' %offshoredata['tp'][itime,isite] + \\\n ',%3d' %offshoredata['dirn'][itime,isite] + \\\n ',%4.2f' %surfdata['shorewardHs'][itime,isite] + \\\n ',%4.1f' %surfdata['shorewardT'][itime,isite] + \\\n ',%3d' %surfdata['relativeDirn'][itime,isite] + \\\n ',%4.2f' %surfdata['breakerHs'][itime,isite] + \\\n ',%4.2f' %surfdata['saturatedDepth'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in3'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in10'][itime,isite] + \\\n ',%1d' %surfdata['breakerType'][itime,isite] + '\\r\\n')\n outp.close()",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def export_csv(user, tasks):\n employee_name = user[0]['name']\n employee_id = user[0]['id']\n csvfile = '{}.csv'.format(employee_id)\n with open(csvfile, mode='w') as file:\n towrite = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL)\n for task in tasks:\n towrite.writerow([employee_id, employee_name,\n task['completed'], task['title']])",
"def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])",
"def field_to_csv_AdSchedule(entity, id):\n if entity is None:\n return None\n if entity.DayTimeRanges is None:\n return DELETE_VALUE if id and id > 0 else None\n return ';'.join('({0}[{1:02d}:{2:02d}-{3:02d}:{4:02d}])'\n .format(d.Day, d.StartHour, int(minute_bulk_str(d.StartMinute)), d.EndHour, int(minute_bulk_str(d.EndMinute)))\n for d in entity.DayTimeRanges.DayTime\n )",
"def _csvWriter(self):\r\n # Initialize Header\r\n table = []\r\n voltageRow = []\r\n for i in range(len(self._voltages)):\r\n voltageRow.append(self._voltages[i][0])\r\n voltageRow.append(\" \")\r\n if self._vna.isTwoComponents():\r\n voltageRow.append(\" \")\r\n table.append(voltageRow)\r\n \r\n # Fill table with data\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._frequency[0])):\r\n # row = []\r\n # for j in range(len(self._frequency)):\r\n # row.append(self._frequency[j][i])\r\n # row.append(self._intensity[j][2*i])\r\n # row.append(self._intensity[j][2*i + 1])\r\n # table.append(row)\r\n # else: \r\n for i in range(len(self._frequency[0])):\r\n row = []\r\n for j in range(len(self._frequency)):\r\n row.append(self._frequency[j][i])\r\n row.append(self._intensity[j][i])\r\n table.append(row)\r\n\r\n # Write to CSV\r\n filename = 'CSVs/' + self._vna.getDateFormatted() + '.csv'\r\n with open(filename, 'w', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\r\n for i in range(len(table)):\r\n dataWriter.writerow(table[i])",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def generateCSV(self, objs=None, fields=None, delimiter='semicolon',\n quote_char='double_quote', coding=None,\n export_type='Registrant'):\n\n #container = self.unrestrictedTraverse(\n # self.REQUEST.get('current_path'))\n if objs is None:\n objs = self.get_registrants()\n\n delim_map = {\n 'tabulator': '\\t',\n 'semicolon': ';',\n 'colon': ':',\n 'comma': ',',\n 'space': ' ',\n }\n\n delimiter = delim_map[delimiter]\n quote_map = {'double_quote': '\"', 'single_quote': \"'\", }\n quote_char = quote_map[quote_char]\n\n # generate result\n if fields is None:\n result = ''\n else:\n rows = [fields]\n pwf = self.context.portal_workflow\n for obj in objs:\n row = []\n #code to append creationDate since it is not part of the fields list\n row.append(obj.CreationDate())\n row.append(obj.getId())\n for fieldname in fields:\n if fieldname.find('.') != -1:\n fieldname, key = fieldname.split('.')\n try:\n field = obj.Schema()[fieldname]\n value = field.getAccessor(obj)()\n row.append(value)\n except KeyError:\n row.append('')\n row.append(self.translate(pwf.getInfoFor(obj, 'review_state')).encode('utf-8'))\n rows.append(row)\n rows[0].insert(0, 'id')\n rows[0].insert(0, 'date')\n rows[0].append('review_state')\n # convert lists to csv string\n ramdisk = StringIO()\n writer = csv.writer(ramdisk, delimiter=delimiter)\n writer.writerows(rows)\n result = ramdisk.getvalue()\n ramdisk.close()\n\n return result",
"def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"",
"def create_csv_runtime(analysis, paths, csv_parameters):\n\n run_times = analysis.get_runtimes(paths)\n runs = max(run_times.keys()) + 1\n\n output_csv_runtime = csv_parameters\n runtimes = ['Runtimes']\n for run in range(runs):\n runtimes.append(run_times[run])\n output_csv_runtime.append(runtimes)\n\n return output_csv_runtime",
"def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")",
"def generate_csv_table(table_values):\n\n with open('ayasdi_assignment.csv', 'wb') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',')\n filewriter.writerows(table_values)",
"def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")",
"def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)",
"def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)",
"def field_to_csv_FeedItemAdSchedule(entity, id):\n if entity is None:\n return None\n if entity.daytime_ranges is None:\n return DELETE_VALUE if id and id > 0 else None\n return ';'.join('({0}[{1:02d}:{2:02d}-{3:02d}:{4:02d}])'\n .format(d.Day, d.StartHour, int(minute_bulk_str(d.StartMinute)), d.EndHour, int(minute_bulk_str(d.EndMinute)))\n for d in entity.daytime_ranges\n )"
]
| [
"0.63309157",
"0.63027954",
"0.6272687",
"0.6155261",
"0.61245793",
"0.6047776",
"0.60094565",
"0.59290546",
"0.59159285",
"0.5870327",
"0.5846131",
"0.58076143",
"0.5802007",
"0.5800648",
"0.578713",
"0.5776152",
"0.5773833",
"0.5773768",
"0.5768415",
"0.5747529",
"0.57474315",
"0.5745204",
"0.5731817",
"0.5728752",
"0.5721064",
"0.57094437",
"0.5704358",
"0.569956",
"0.5691341",
"0.56704026"
]
| 0.85996705 | 0 |
Method load_class_list reads class information from a csv file and adds classes to the classes list and the classStringList | def load_class_list():
try:
firstLine = True #keeping track of the first line in the csv file (the header)
index = 0
if os.access("mySchedule.csv", os.F_OK): #If the file exists
f = open("mySchedule.csv")
for row in csv.reader(f):
if firstLine:
firstLine = False
continue #skip first line
classStringList.insert(index, row) #load file to classString list and to classes list
classes.insert(index, Class(row[1], Day(row[0]), formatFromCSV(row[2]), formatFromCSV(row[3])))
index += 1
f.close()
except Exception as e:
print("Exception found:" + e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_class_list(self, module, classes):",
"def load_from_file_csv(cls):\n if path.exists(cls.__name__ + \".csv\") is False:\n return []\n with open(cls.__name__ + \".csv\", \"r\", newline='') as f:\n listofinstances = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, value in row.items():\n row[key] = int(value)\n listofinstances.append(cls.create(**row))\n return listofinstances",
"def load_from_file_csv(cls):\n list_obj = []\n if os.path.exists(cls.__name__ + \".csv\"):\n with open(cls.__name__ + \".csv\", \"r\") as _file:\n str_csv = _file.read()\n _file.close()\n _dict = Base.from_json_string(str_csv)\n for obj in _dict:\n list_obj.append(cls.create(**obj))\n return(list_obj)",
"def set_class_list(self, L):\n\t\tself.class_list = L",
"def load_from_file_csv(cls):\n fields = []\n rows = []\n new_dict = {}\n new_list = []\n key = \"\"\n filename = cls.__name__ + \".csv\"\n with open(filename) as fp:\n reader = csv.reader(fp)\n fields = next(reader)\n for row in reader:\n rows.append(row)\n for row in rows:\n i = 0\n new_dict = new_dict.fromkeys(fields)\n for attr in fields:\n key = fields[i]\n value = row[i]\n new_dict[key] = value\n i += 1\n new_list.append(cls.create(**new_dict))\n return new_list",
"def parseClasses(file_name):\n\tlines = file(file_name).read().strip().split('\\n')\n\tlines = [x.strip() for x in lines if len(x.strip()) > 0]\n\tclasses = []\n\tfor l in lines:\n\t\tclasses = classes + [clean(x) for x in l.split(',')]\n\treturn classes",
"def load_from_file_csv(cls):\n new_list = []\n try:\n with open(\"%s.csv\" % cls.__name__, mode='r') as f:\n file = cls.from_json_string(f.read())\n for i in file:\n new_list.append(cls.create(**i))\n except Exception:\n pass\n return new_list",
"def class_names_from_csv(class_map_csv_text):\n class_names = []\n with tf.io.gfile.GFile(class_map_csv_text) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n class_names.append(row['display_name'])\n\n return class_names",
"def load_classes(path):\n with open(path, 'r') as f:\n names = f.read().split(\"\\n\")\n # Filter removes empty strings (such as last line)\n return list(filter(None, names))",
"def getListClasses():\n\n f=open('csv_files/class-descriptions-boxable.csv',\"r\",encoding='utf8') # file matching the name of the class and its identifier (ex: /m/011k07,Tortoise )\n for ligne in f:\n a=0\n while a < 600:\n ligne = f.readline() # Analysis of the file line by line\n mots = ligne.split(\",\")\n name_classe = (mots[1])\n print(name_classe)\n a+=1\n #reading the file line by line and displaying the name of each class",
"def populate_list(self, input_filename):\r\n f = open(input_filename, 'r')\r\n\r\n for line in f:\r\n # Process the input line\r\n line_split = line.strip().split('(')\r\n line_split[-1] = line_split[-1][:-1] # Removes the extra bracket at the end\r\n\r\n class_name = line_split[0]\r\n parameters = line_split[1].split(',')\r\n self.metrics.append(self.instantiate_class(class_name, *parameters))\r\n\r\n f.close()",
"def read_class_list(self, path):\n # file_path = path\n # im_names = []\n # for root, dirs, files in os.walk(file_path, topdown=False):\n # for name in files:\n # if os.path.splitext(os.path.join(root, name))[1].lower() == \".jpeg\":\n # # if name.split('.')[0].split('-')[-1].lower()=='5x':\n # im_names.append(os.path.join(root, name))\n\n input_file = os.path.join(path)\n # with open(input_file, 'r') as f:\n # data = f.read()\n # df = pd.read_csv(input_file, sep='\\t')\n df = pd.read_csv(input_file)#, lineterminator='\\n')\n #df = df[1:2000]\n # df = df.loc[(df['text_human'].isin(['affected_individuals','infrastructure_and_utility_damage',\n # 'injured_or_dead_people','rescue_volunteering_or_donation_effort'])) | \n # (df['text_info'].isin(['not_informative']))]\n # df = df.loc[df[' Informativeness'].isin(['Related and informative','Related - but not informative'])]\n\n # x_raw = df['text'].apply(lambda x: self.clean_str(x)).tolist()\n # x_raw = df[' tweet'].apply(lambda x: p.tokenize(x)).tolist()\n self.images = df['image_id'].apply(lambda x: self.x_path(x)).tolist()\n self.labels = df['dx'].apply(lambda x: self.y_value(x)).tolist()\n\n x_img_raw_array = np.array(self.images)\n y_raw_array = np.array(self.labels)\n\n sss = StratifiedShuffleSplit(n_splits=1, test_size=0.3, random_state=1)\n sss.get_n_splits(x_img_raw_array, y_raw_array)\n for train_idx, test_idx in sss.split(x_img_raw_array, y_raw_array):\n x_img_train_array = x_img_raw_array[train_idx]\n x_img_test_array = x_img_raw_array[test_idx]\n\n y_train = y_raw_array[train_idx]\n y_test = y_raw_array[test_idx]\n\n #train_imgs, test_imgs, train_labels, test_labels = train_test_split(x_raw_array, y_raw_array, test_size=0.3, random_state=1)\n\n self.train_images = list(x_img_train_array)\n self.train_labels = list(y_train)\n\n self.test_images = list(x_img_test_array)\n self.test_labels = list(y_test)\n\n #im_names = next(walk(path))[2]\n #num_files = len(im_names)\n # self.images = []\n # self.labels = []\n # #filenames=filenames[2:]\n # for i, filename in enumerate(im_names):\n # label = filename.split(\"/\")[2]\n # if label != 'unlabelled':\n # #self.images.append(join(path, filename))\n # if label == 'trainA':\n # self.images.append(join(filename))\n # self.labels.append(1)\n # elif label == 'trainB':\n # self.images.append(join(filename))\n # self.labels.append(0)\n self.train_data_size = len(self.train_labels)\n self.test_data_size = len(self.test_labels)",
"def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []",
"def load_from_file_csv(cls):\n list_rectangle = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n list_square = [\"id\", \"size\", \"x\", \"y\"]\n filename = cls.__name__ + \".csv\"\n dictionary = []\n result = []\n\n try:\n with open(filename, encoding=\"utf-8\") as file:\n obj_list = csv.reader(file)\n # read obj_list <_csv.reader object at 0x7fbfe5614b38>\n if cls.__name__ == \"Rectangle\":\n for list in obj_list:\n # create dictionary\n dict = {}\n for key, value in zip(list_rectangle, list):\n dict[key] = int(value)\n # create an object and append to a list\n result.append(cls.create(**dict))\n if cls.__name__ == \"Square\":\n for list in obj_list:\n # create dictionary\n dict = {}\n for key, value in zip(list_square, list):\n dict[key] = int(value)\n # create an object and append to a list\n result.append(cls.create(**dict))\n return result\n except:\n return result",
"def save_class_list():\r\n try:\r\n classStringList.clear() #clear the classString List\r\n for i in range(0,len(classes)):\r\n classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes\r\n f = open(\"mySchedule.csv\", 'w', newline ='')\r\n csv.writer(f).writerow([\"Day\", \"Class\", \"Start Time\", \"End Time\"])\r\n for classCSVString in classStringList:\r\n csv.writer(f).writerow(classCSVString)\r\n f.close()\r\n except Exception as e:\r\n print(\"Exception found:\" + e)",
"def load_events_classes(fhandle: TextIO) -> list:\n reader = csv.reader(fhandle, delimiter=\",\")\n headers = next(reader)\n class_ids = headers[3:]\n return class_ids",
"def load_classes():\n \tfnm = \"../datasets/bbc/bbc.classes\"\n \tconverters = { \"docid\": toInt, \"docid\":toInt}\n \tX = pandas.read_table(fnm, header=None, sep=\" \", skiprows=4, comment=\"%\", names= [\"docid\", \"classid\"], converters=converters)\n \treturn X",
"def load_classes(path):\n fp = open(path, \"r\")\n names = fp.read().split(\"\\n\")[:-1]\n # -1까지 하는 이유 마지막에 공백이 있다.\n print(\"Load Class Nums : \",len(names))\n return names",
"def GetClassesFromFile(self,file_path):\n classes = []\n try:\n fl = open(file_path,\"r\")\n for line in fl.readlines():\n if \"class\" in line and \":\" in line:\n line = line.strip(\"class \")\n line2 = \"\"\n for i in line:\n if i!=\":\": line2+=i\n\n classes.append(line2)\n if classes:\n return classes\n else:\n return False\n fl.close()\n except:\n return False",
"def _load_classes(self):\n\t\t# load class names (name -> label)\n\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\tself.classes \t\t\t\t= {}\n\t\tself.coco_labels \t\t\t= {}\n\t\tself.coco_labels_inverse \t= {}\n\t\tfor c in categories:\n\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\tself.classes[c['name']] = len(self.classes)\n\t\tself.labels = {}\n\t\tfor key, value in self.classes.items():\n\t\t\tself.labels[value] = key\n\n\t\tprint(self.coco_labels)\n\t\tprint(self.coco_labels_inverse)\n\t\tprint(self.classes)\n\t\tprint(self.labels)",
"def load_csv(data_file_path, class_index=-1):\n\n handle = open(data_file_path, 'r')\n contents = handle.read()\n handle.close()\n rows = contents.split('\\n')\n out = np.array([[float(i) for i in r.split(',')] for r in rows if r])\n\n if class_index == -1:\n classes = map(int, out[:, class_index])\n features = out[:, :class_index]\n return features, classes\n\n elif class_index == 0:\n classes = map(int, out[:, class_index])\n features = out[:, 1:]\n return features, classes\n\n else:\n return out",
"def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)",
"def read_csv_vidor(csv_file, class_whitelist=None, load_score=False):\n boxes = defaultdict(list)\n labels = defaultdict(list)\n scores = defaultdict(list)\n with PathManager.open(csv_file, \"r\") as f:\n reader = csv.reader(f)\n\n import pdb; pdb.set_trace()\n for row in reader:\n assert len(row) in [7, 8], \"Wrong number of columns: \" + row\n image_key = make_image_key(row[0], row[1])\n x1, y1, x2, y2 = [float(n) for n in row[2:6]]\n action_id = int(row[6])\n if class_whitelist and action_id not in class_whitelist:\n continue\n score = 1.0\n if load_score:\n score = float(row[7])\n boxes[image_key].append([y1, x1, y2, x2])\n labels[image_key].append(action_id)\n scores[image_key].append(score)\n return boxes, labels, scores",
"def read_classes(file, class_list):\n\n if 'PSB' not in file.readline().strip():\n raise ('Not a valid PSB classification header', ImportError)\n\n _, num_models = file.readline().strip().split()\n modelcount = 0\n class_dict = {}\n\n while modelcount < int(num_models):\n line = file.readline().strip().split()\n if len(line) == 0:\n pass \n elif len(line) > 2 and line[2] == '0': # empty class label\n pass\n elif len(line) > 2:\n class_name = str(line[0])\n # if the class not in the class_list add it\n if class_name not in class_list:\n class_list.append(class_name)\n else: # add the class to the number of the model\n class_id = class_list.index(class_name) # give class id based on class_list index\n class_dict[line[0]] = (class_id, class_name)\n modelcount += 1\n\n return class_dict, class_list",
"def read_classes_from_file(self, class_file):\n items = []\n with open(class_file) as f:\n for cl in f.readlines():\n # c:code, d:description\n item = [{'value': c, 'text': f'{c}: ' + d.replace('\\n','')} for c, d in [cl.split(',')]]\n items+=item\n \n return items",
"def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key",
"def getIdClass():\n name = parser_arguments().classes #list format\n classe = \"-\".join(name) #string format\n f=open('csv_files/class-descriptions-boxable.csv',\"r\",encoding='utf8')\n for l in f:\n a=0\n while a < 600:\n l = f.readline()\n mots = l.split(\",\")\n name = mots[1]\n name = name.replace(\" \",\"\")\n\n if classe in name:\n id_classe = mots[0]\n return id_classe\n # line by line of the file, we check if the name of the class passed in parameter is present.\n # if yes, we return the identifier of this class\n # if not, we test the file's following line\n else:\n a = a+1",
"def get_class_names(dataset = ADE20K, csv_path = BASE_DATASET_DESCRIPTION_PATH):\r\n # Check that the information of this dataset is available\r\n if not os.path.isfile(os.path.join(csv_path, dataset+'.csv')):\r\n raise RuntimeError(\"Class names of {df} are not available.\\n Unique available datasets are: {aval}\"\r\n .format(df=dataset, aval=os.listdir(csv_path)))\r\n # Process the csv\r\n info_path = os.path.join(csv_path, dataset + '.csv')\r\n info = np.genfromtxt(fname=info_path, dtype='<U128',skip_header=True,delimiter=',')\r\n info = info[..., -1]\r\n classes = [classes.split(MULTIPLE_CLASSES_SEPARATOR)[0] for classes in info]\r\n # Return the class names formatted as numpy for faster association.\r\n return np.array(classes, dtype='<U128')",
"def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels",
"def _read_classes(csv_reader):\n result = {}\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n class_name, class_id = row\n except ValueError:\n raise_from(ValueError('line {}: format should be \\'class_name,class_id\\''.format(line)), None)\n class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))\n\n if class_name in result:\n raise ValueError('line {}: duplicate class name: \\'{}\\''.format(line, class_name))\n result[class_name] = class_id\n return result"
]
| [
"0.66627824",
"0.64263815",
"0.6320585",
"0.63066614",
"0.6295501",
"0.6259383",
"0.62000376",
"0.61884654",
"0.6180721",
"0.60922486",
"0.6062719",
"0.60619515",
"0.60037845",
"0.59632725",
"0.5914574",
"0.58258075",
"0.5812363",
"0.57939637",
"0.5776928",
"0.5750722",
"0.5745595",
"0.5694624",
"0.56920594",
"0.5682146",
"0.56724155",
"0.56536967",
"0.56393844",
"0.56099993",
"0.55720526",
"0.55469346"
]
| 0.8204866 | 0 |
Method save_class_list exports current information in the classes list to a csv | def save_class_list():
try:
classStringList.clear() #clear the classString List
for i in range(0,len(classes)):
classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes
f = open("mySchedule.csv", 'w', newline ='')
csv.writer(f).writerow(["Day", "Class", "Start Time", "End Time"])
for classCSVString in classStringList:
csv.writer(f).writerow(classCSVString)
f.close()
except Exception as e:
print("Exception found:" + e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_to_file_csv(cls, list_objs):\n with open(cls.__name__ + \".csv\", \"w\", newline='') as f:\n if cls.__name__ == \"Rectangle\":\n fieldnames = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n fieldnames = ['id', 'size', 'x', 'y']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n if list_objs is not None:\n for model in list_objs:\n writer.writerow(model.to_dictionary())",
"def save_to_file_csv(cls, list_objs):\n list_rectangle = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n list_square = [\"id\", \"size\", \"x\", \"y\"]\n filename = cls.__name__ + \".csv\"\n result = []\n\n if list_objs:\n for objs in list_objs:\n # First recollect the info of the object with a dict\n dictionary = objs.to_dictionary()\n middle_result = []\n # Second obtein the values in a ordered class list\n if cls.__name__ == \"Rectangle\":\n for item in list_rectangle:\n middle_result.append(dictionary[item])\n if cls.__name__ == \"Square\":\n for item in list_square:\n middle_result.append(dictionary[item])\n # append the list to result list\n result.append(middle_result)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerows(result)",
"def save_to_file_csv(cls, list_objs):\n ld = []\n with open(cls.__name__ + \".csv\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n if cls.__name__ == 'Rectangle':\n ld.append([\n obj.id, obj.width, obj.height, obj.x, obj.y])\n if cls.__name__ == 'Square':\n ld.append([obj.id, obj.size, obj.x, obj.y])\n writer = csv.writer(f)\n for row in ld:\n writer.writerow(row)",
"def save_to_file_csv(cls, list_objs):\n r_fields = ['id', 'width', 'height', 'x', 'y']\n s_fields = ['id', 'size', 'x', 'y']\n filename = cls.__name__ + \".csv\"\n new_list = []\n with open(filename, \"w\") as fp:\n if cls.__name__ == \"Rectangle\":\n dict_writer = csv.DictWriter(fp, fieldnames=r_fields)\n elif cls.__name__ == \"Square\":\n dict_writer = csv.DictWriter(fp, fieldnames=s_fields)\n dict_writer.writeheader()\n for objs in list_objs:\n dict_writer.writerow(objs.to_dictionary())",
"def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))",
"def save_to_file_csv(cls, list_objs):\n f_name = cls.__name__ + \".csv\"\n with open(f_name, 'w', newline='') as f:\n if list_objs is None or list_objs == []:\n f.write(\"[]\")\n\n else:\n if cls.__name__ == 'Rectangle':\n h = ['id', 'width', 'height', 'x', 'y']\n else:\n h = ['id', 'size', 'x', 'y']\n ncsv = csv.DictWriter(f, fieldnames=h)\n for obj in list_objs:\n ncsv.writerow(obj.to_dictionary())",
"def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()",
"def save_class_representation(self):\n class_dict = {}\n for key, value in self.class_dict.items():\n class_dict['-'.join(key)] = list(value)\n with open('data/class_vectors.txt', 'w') as file:\n json.dump(class_dict, file)\n return class_dict",
"def save_class(self, a, class_name):\n logging.debug(\"in save class \" + class_name)\n self.produce(\"class_name\", class_name)\n self.classes.append(class_name)\n self.begin('')",
"def process_class_list(self, module, classes):",
"def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])",
"def save(self):\n\t\tprint('bSlabList.save() not implemented')\n\n\t\t# headers are keys of xxxx\n\n\t\t# each element in xxx is a comma seperated row",
"def save_to_file(cls, list_objs):\n filename = cls.__name__\n dictt = []\n if list_objs:\n for i in list_objs:\n dictt.append(cls.to_dictionary(i))\n\n with open(filename + \".json\", \"w\") as myfile:\n myfile.write(cls.to_json_string(dictt))",
"def save_entries(self):\n with open(self.file_name, \"w\") as file:\n file.write('date,name,minutes,note\\n')\n for entry in self.entries:\n writer = csv.writer(file)\n writer.writerow([entry.date, entry.name, entry.minutes, entry.note])",
"def class_to_db(self):",
"def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])",
"def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n new_list = []\n with open(filename, \"w\") as fp:\n if list_objs is None:\n fp.write(\"[]\")\n else:\n for objs in list_objs:\n new_list.append(cls.to_dictionary(objs))\n fp.write(cls.to_json_string(new_list))",
"def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))",
"def save_to_csv(list_return, name, fieldnames):\n os.makedirs(os.path.dirname(name + '.csv'), exist_ok=True)\n with open(name + '.csv', 'w') as csvfile:\n csvfile.write(','.join(map(str, field_names)))\n csvfile.write('\\n')\n write = csv.writer(csvfile, delimiter=',')\n for x in range(0, len(list_return)):\n write.writerow(list_return[x])",
"def export_cropobject_class_list(cropobject_classes):\n # type: (List[CropObjectClass]) -> str\n cropobject_classes_string = '\\n'.join([str(c) for c in cropobject_classes])\n\n lines = list()\n\n lines.append('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n lines.append('<CropObjectClassList'\n ' noNamespaceSchema=\"mff-muscima-cropobject-classes.xsd\"'\n ' xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"'\n ' xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">')\n lines.append('<CropObjectClasses>')\n lines.append(cropobject_classes_string)\n lines.append('</CropObjectClasses>')\n lines.append('</CropObjectClassList>')\n return '\\n'.join(lines)",
"def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())",
"def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))",
"def do_save(self):\n id_client = json.loads(request.data.decode('UTF-7'))['id']\n g_list_of_classifier.save_in_file(CLASSIFIER_PATH + str(id_client) + \n '.cls')\n return ''",
"def save_csv(outputfile):\n with open(outputfile, 'w', newline='') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(DATA_KEYS)\n\n # Add data to csv-file\n for data in data_list:\n writer.writerow(data)",
"def save_to_file(cls, list_objs):\n objs = []\n filename = cls.__name__ + \".json\"\n if list_objs is not None:\n for ob in list_objs:\n objs.append(cls.to_dictionary(ob))\n with open(filename, \"w\") as f:\n f.write(cls.to_json_string(objs))",
"def save_to_file(cls, list_objs):\n my_list = []\n if list_objs or list_objs is not None:\n my_list = [obj.to_dictionary() for obj in list_objs]\n with open(cls.__name__ + '.json', 'w+') as f:\n f.write(cls.to_json_string(my_list))",
"def save_to_file(cls, list_objs):\n d = []\n with open(cls.__name__ + \".json\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n d.append(obj.to_dictionary())\n f.write(cls.to_json_string(d))",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n result = []\n if list_objs:\n for objs in list_objs:\n dictionary = objs.to_dictionary()\n result.append(dictionary)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n file.write(cls.to_json_string(result))",
"def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')"
]
| [
"0.6824156",
"0.679497",
"0.66754794",
"0.6620248",
"0.6603461",
"0.6543441",
"0.6356623",
"0.62391156",
"0.6098659",
"0.6059709",
"0.60516226",
"0.60439676",
"0.6023683",
"0.59847575",
"0.594807",
"0.5928933",
"0.5911791",
"0.5906582",
"0.5900664",
"0.58869207",
"0.5883265",
"0.58768916",
"0.58547425",
"0.5852083",
"0.58405983",
"0.58401465",
"0.581789",
"0.5811888",
"0.57951516",
"0.57834864"
]
| 0.8075002 | 0 |
Method clearTerminal clears the Terminal for any OS | def clearTerminal():
os.system('cls' if os.name == 'nt' else 'clear') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_terminal(self):\n os.system('clear')",
"def resetTerminal():\n sys.stdout.write('\\n\\n') # add a few blank lines\n sys.stdout.flush()\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')",
"def clear():\n\n # windows \n if os.name == \"nt\": \n _ = os.system(\"cls\") \n # mac and linux\n else: \n _ = os.system(\"clear\")",
"def clear():\n if platform.system() == \"Windows\":\n os.system('cls')\n elif platform.system() == \"Linux\":\n os.system('clear')",
"def clear_screen():\n if name == \"nt\":\n system('cls')\n else:\n system('clear')",
"def clearConsole():\r\n\r\n command = 'clear' # command for console clearing\r\n if os.name in ('nt', 'dos'): # if the machine is running on Windows, then use cls\r\n command = 'cls'\r\n os.system(command) # othen than Windows, use clear\r",
"def command_clearterm():\n subprocess.call(\"reset\")",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)",
"def clear_console():\n os.system('cls' if os.name == 'nt' else \"clear\")",
"def clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')",
"def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear_screen():\n if os.name == 'nt':\n os.system(\"cls\")\n else:\n os.system(\"clear\")",
"def _clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')",
"def clear_console():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\r\n if name == 'nt':\r\n _ = system('cls')\r\n else:\r\n _ = system('clear')",
"def clear_screen() -> None:\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")",
"def clear(): \n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")",
"def clear():\n if os.name == 'nt': \n os.system('cls') \n else: \n os.system('clear')",
"def clear_console():\n import os\n clear = lambda: os.system('cls')\n clear()\n return None",
"def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear_screen():\n os.system(\"cls\" if os.name == 'nt' else 'clear')",
"def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\r\n os.system('cls' if os.name == 'nt' else 'clear')"
]
| [
"0.8650224",
"0.8027377",
"0.7552109",
"0.75257945",
"0.74613637",
"0.745878",
"0.74554735",
"0.7451339",
"0.7451339",
"0.7451339",
"0.7439368",
"0.74361753",
"0.7435025",
"0.7435025",
"0.7429302",
"0.74284637",
"0.7426968",
"0.74216986",
"0.7405859",
"0.7401864",
"0.7398269",
"0.7385364",
"0.7373711",
"0.7373711",
"0.73054266",
"0.7301281",
"0.7301281",
"0.72967786",
"0.72967786",
"0.72943795"
]
| 0.8741407 | 0 |
Method delay delays for 2 seconds by calling time.sleep() | def delay():
time.sleep(2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)",
"async def sleep(cls, delay: float) -> None:",
"def sleep(self):\n time.sleep(0.2)",
"def delay(ms: int, /) -> None:",
"def wait(delay=2):\n time.sleep(delay)",
"def _delay(self, n=None):",
"def sleep(seconds):\r\n time.sleep(seconds)",
"def sleep(seconds):\n time.sleep(seconds)",
"def sleep(seconds):\n time.sleep(seconds)",
"def sleep(self, amount: float):\n time.sleep(amount)",
"def __delay(msecs):\n time.sleep(msecs / 1000)",
"def sleep(sleep_time=0.250):\n time.sleep(sleep_time)",
"def _delay(self, delay=None):\n return self.screen.delay(delay)",
"def delay(self, length):\n self.log_info(f\"Browser.delay: Sleeping for {length} seconds\")\n return sleep(length)",
"def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'",
"def RandomDelay():\r\n sleep(random())",
"def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)",
"def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)",
"def delay(interval):\n time.sleep(interval / 1000.0)",
"def sleep(seconds):\n\n return Sleep(seconds)",
"def deepsleep(time_ms: int = None) -> None:",
"def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))",
"def sleep(secs=1.0):\n time.sleep(secs)",
"def sleep(self, seconds):\n time.sleep(seconds)",
"def sleep(interval):\n time.sleep(interval) # pragma: no cover",
"def pause(seconds):\n time.sleep(seconds);",
"def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)",
"def test_delay():\n time1 = time.time()\n res = delay(1)(_dummy_func)(2)\n time2 = time.time()\n assert res == (2, 4)\n assert time2 - time1 >= 1",
"def randomized_sleep(duration):\n sleep(duration + duration * random.random())",
"def sleep(self):\n if self._stop is not None:\n timeLeft = max(self._stop - time.time(), 0) \n sleep = min(self._sleep, timeLeft)\n else:\n sleep = self._sleep\n time.sleep(sleep)"
]
| [
"0.8206127",
"0.80002713",
"0.7910068",
"0.7777698",
"0.7647302",
"0.763422",
"0.75461113",
"0.7504566",
"0.7504566",
"0.7499666",
"0.7484212",
"0.74510634",
"0.7408911",
"0.7404746",
"0.7371981",
"0.7316838",
"0.7283153",
"0.72631913",
"0.7243154",
"0.72041893",
"0.71867824",
"0.71609515",
"0.71344113",
"0.7116204",
"0.70809656",
"0.705022",
"0.699194",
"0.6922028",
"0.68628407",
"0.6851381"
]
| 0.8952803 | 0 |
Method mainLoop keeps the program going until the user decides to exit | def mainLoop():
clearTerminal()
load_class_list()
print("Class Scheduler Program by Cameron Zurmuhl")
print("Lafayette College, Class of 2020")
print("Email: [email protected]\n")
while True:
choice = mainMenuChoice()
if choice is None:
delay()
clearTerminal()
continue
if choice == 'q':
print("\nExiting...")
save_class_list()
delay()
clearTerminal()
break
elif choice == 'n':
addClass()
elif choice == 'd':
deleteClass()
clearTerminal()
elif choice == 's':
displayClassList(False)
if(len(classes)==0):
clearTerminal()
elif choice == 'e':
editClass()
clearTerminal()
elif choice == 'p':
print()
printSchedule()
elif choice == 'g':
print()
genScheduleCSV()
print("\nExiting...")
break
else:
print("\nInvalid choice.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main_loop(self):\n try:\n self.state_machine.set_state('wait')\n\n while True:\n events = list(reversed(pygame.event.get())) # Take all events, most recent first\n\n if self.find_quit_event(events):\n break\n\n if self.find_fullscreen_event(events):\n self.window.toggle_fullscreen()\n\n event = self.find_resize_event(events)\n if event:\n self.window.resize(event.size)\n\n self.state_machine.process(events)\n\n finally:\n self.led_picture.quit()\n self.led_print.quit()\n GPIO.cleanup()\n self.camera.quit()\n self.printer.quit()\n pygame.quit()",
"def main_loop(self):\n while self.game_manager.game_state != GameState.Quit:\n\n self.handle_events()\n self.handle_ui_response()\n #in menu\n if self.game_manager.game_state == GameState.Menu: \n self.display.clear()\n\n #in game\n elif self.game_manager.game_state == GameState.Running:\n self.game_manager.move_players()\n\n #after game\n elif self.game_manager.game_state == GameState.Finished:\n if self.game_manager.winner == None:\n self.game_manager.player1.decay()\n self.game_manager.player2.decay() \n else:\n self.game_manager.loser.decay()\n self.game_manager.loser.draw()\n\n #perform game manager actions\n self.game_manager.act()\n #do all the rendering stuff\n self.render_scene()\n #control FPS\n self.clock.tick(self.FPS)",
"def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C received\")\n self.irc.disconnect_all(\"even de suiker bijvullen\")\n \n self.logger.info(\"Finished disconnecting, shutting down\")",
"def main_loop(self):\r\n print('Press ctrl-c to quit')\r\n while True:\r\n url = input('\\nType Question url: ')\r\n handler = AnswerHandler(self.session)\r\n res, err = handler.answer_questions(url)\r\n if res:\r\n print('No more questions for this URL')\r\n else:\r\n print(f'Unexpected exception occurred: {err}', file=sys.stderr)\r\n traceback.print_exc()",
"def loop(self):\n while not self.should_exit:\n self._run_once()\n\n self.on_exit()",
"def main_loop(self):\n # main loop...don't ever exit\n while True:\n # collect data\n # get the time...the local clock is set with NTP regularly\n self._get_time()\n \n # get the latest metar data from the closest location\n self._get_metar()\n \n # get the latest fence station data\n self._get_fence_station()\n \n # get the lastest roof station data\n #METAR self._get_roof_station()\n \n # publish the data to our data file\n self.write_data_files()\n \n # show the user we are running\n print(\"{:s}\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d %H:%M:%S.%f\")), end=\"\\r\", flush=True)\n \n # wait a bit for the next loop\n time.sleep(3.0)\n \n return",
"def main(self):\n self.startup()\n if self.vehicle:\n try:\n while not self._loop_should_exit:\n self.tick()\n time.sleep(1)\n except KeyboardInterrupt:\n self.cleanup()\n self.cleanup()",
"def main_loop(self) -> None:\n # Modify signal handlers to make sure Ctrl-C is caught and handled.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n self._impl.main_loop()",
"def loop(self):\n\t\twhile (self.quit == 0):\n\t\t\ttry:\n\t\t\t\tuserInput = raw_input(\"> \")\n\t\t\t\tself.processCommand(userInput)\n\t\t\texcept EOFError:\n\t\t\t\tsys.exit()\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tsys.exit()",
"def MainLoop(self):\n while 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()",
"def main():\n\n while True:\n print(\"Let's explore some US bikeshare data!\")\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # printing filter\n print(f\"Month: {month}, Day: {day}\")\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_records(df)\n restart = prompts.yes_no_prompt(\"\\nWould you like to restart?\\n\").launch()\n if not restart:\n break\n system(\"clear\")",
"def loop(self):\n pass",
"def main_loop(self):\n import time\n while not self.ask_for_stop:\n self.run_pending()\n time.sleep(self.delay)\n # FIXME this will look at self.ask_for_stop only every self.delay seconds\n # see https://stackoverflow.com/questions/5114292/break-interrupt-a-time-sleep-in-python",
"def main_loop(self):\n dt = 0\n self.clock.tick(FPS)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(FPS) / 1000.0",
"def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)",
"def main_loop(self) -> None:\n while True:\n player = self._players[self._current_player]\n hit = True\n while hit:\n self.select_square(player)\n if self.menu_called: # go to menu\n self.menu_called = False\n return\n hit = player.shoot()\n if player.has_won():\n self.display_manager.display_end_game_message(player)\n self.game_over = True\n return\n self._current_player = (self._current_player + 1) % len(self._players)",
"async def _main(self):\n while True:\n time.sleep(1)",
"def main():\n while True:\n click.clear()\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n while True:\n select_data = choice('\\nPlease select the information you would'\n 'like to obtain:\\n'\n '\\n'\n '[ts] Time Stats\\n'\n '[ss] Station Stats\\n'\n '[tds] Trip Duration Stats \\n'\n '[us] User Stats\\n'\n '[rd] Raw Data\\n'\n '\\n'\n '[0] Exit\\n>',\n ('ts', 'ss', 'tds', 'us', 'rd', 'r'))\n click.clear()\n if select_data == 'ts':\n time_stats(df)\n elif select_data == 'ss':\n station_stats(df)\n elif select_data == 'tds':\n trip_duration_stats(df)\n elif select_data == 'us':\n user_stats(df)\n elif select_data == 'rd':\n display_data(df)\n elif select_data == '0':\n break\n\n restart = choice('\\nWould you like to restart?'\n 'Enter yes or no.\\n').lower()\n print()\n if restart.lower() != 'y':\n break",
"def main():\n is_program_working = True\n while is_program_working:\n display.print_program_menu(MAIN_MENU)\n try:\n choose_option()\n except ValueError as err:\n display.print_command_result(str(err))",
"def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()",
"def main_loop(self):\n dt = 0.3\n self.clock.tick(self.fps)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(self.fps)/1000.0 # create delta time variable to multiply with movement and rotation\n self.display_fps()\n self.health_bar()\n self.enemy_health()\n self.energy_bar()",
"def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise",
"def exit_loop(self):\n self.loop.exit()",
"def main_loop(self):\n center_point = self.mot.center_point\n\n screen_width = center_point[0] * 2\n screen_height = center_point[1] * 2\n\n time.sleep(1)\n pretty_progress_bar(\n 3,\n )\n\n # while int(time.time()) - start <= 10:\n while not self.mot.abort:\n object1_position = self.mot.position(self.color1)[0]\n object2_velocity = self.mot.speed(self.color2)\n # print(object2_velocity)\n\n self.compare(object1_position[0] < 0.25 * screen_width, 'left')\n self.compare(object1_position[0] > 0.75 * screen_width, 'right')\n self.compare(object1_position[1] < 0.25 * screen_height, 'jump')\n self.burst_compare(object2_velocity > 150, 'fire')\n\n # print('KEYBOARD ABORT')",
"def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()",
"def _run(self):\n while(self._loop):\n pass",
"def postloop(self):\n print 'Bye!'",
"def takeControl(self):\n mainloop()",
"def takeControl(self):\n mainloop()",
"def loop():\n try:\n logger.info('Script loop started')\n while (Script.running):\n time.sleep(0.01)\n logger.warning('Script finished')\n except (KeyboardInterrupt, SystemExit):\n logger.warning('Script cancelled from keyboard')\n finally:\n action_exit()"
]
| [
"0.7655049",
"0.7512549",
"0.7502741",
"0.7455534",
"0.74237436",
"0.7375826",
"0.727332",
"0.7264046",
"0.7217026",
"0.7194721",
"0.7161396",
"0.70686316",
"0.70659685",
"0.7030019",
"0.70148635",
"0.7013023",
"0.69647336",
"0.6955067",
"0.6947823",
"0.6946313",
"0.69018674",
"0.689185",
"0.68548065",
"0.6836358",
"0.68148625",
"0.68124133",
"0.6802838",
"0.6792134",
"0.6792134",
"0.677752"
]
| 0.793783 | 0 |
Create OboOptionalAttrs or return None. | def _init_optional_attrs(optional_attrs):
if optional_attrs is None:
return None
opts = OboOptionalAttrs.get_optional_attrs(optional_attrs)
if opts:
return OboOptionalAttrs(opts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, obo_file=OBO_FILE, optional_attrs=None):\n self.optobj = self._init_optional_attrs(optional_attrs) # OboOptionalAttrs or None\n self.format_version = None # e.g., \"1.2\" of \"format-version:\" line\n self.data_version = None # e.g., \"releases/2016-07-07\" from \"data-version:\" line\n self.typedefs = {}\n\n # True if obo file exists or if a link to an obo file exists.\n print(\"obo_file:\")\n print(obo_file)\n if os.path.isfile(obo_file):\n self.obo_file = obo_file\n # GOTerm attributes that are necessary for any operations:\n else:\n raise Exception(\"COULD NOT READ({OBO})\\n\"\n \"download obo file first\\n \"\n \"[http://geneontology.org/ontology/\"\n \"go-basic.obo]\".format(OBO=obo_file))",
"def make_optional(\n name: str,\n elem_type: OptionalProto.DataType,\n value: Optional[Any],\n) -> OptionalProto:\n optional = OptionalProto()\n optional.name = name\n optional.elem_type = elem_type\n\n if elem_type == OptionalProto.UNDEFINED:\n return optional\n if elem_type == OptionalProto.TENSOR:\n attribute = optional.tensor_value\n elif elem_type == OptionalProto.SPARSE_TENSOR:\n attribute = optional.sparse_tensor_value # type: ignore[assignment]\n elif elem_type == OptionalProto.SEQUENCE:\n attribute = optional.sequence_value # type: ignore[assignment]\n elif elem_type == OptionalProto.MAP:\n attribute = optional.map_value # type: ignore[assignment]\n elif elem_type == OptionalProto.OPTIONAL:\n attribute = optional.optional_value # type: ignore[assignment]\n else:\n raise TypeError(\"The element type in the input optional is not supported.\")\n\n attribute.CopyFrom(value) # type: ignore[arg-type]\n return optional",
"def setAllOptional(self):\n for att in self._attributes.values():\n att.outputOptional = True",
"def hasOptionalAttributes(self):\n return _libsbml.SBase_hasOptionalAttributes(self)",
"def _construct_optional(self, params):\r\n\r\n args = []\r\n filtered = {key: arg.default for key, arg in params.items() if arg.default != inspect._empty}\r\n for key, default in filtered.items():\r\n arg = self.OptionalArg(full=key, abbrev=key[0].lower(), default=default)\r\n args.append(arg)\r\n\r\n args_full, args_abbrev = dict(), dict()\r\n\r\n # Resolve conflicts\r\n known_count = defaultdict(int)\r\n for arg in args:\r\n args_full[arg.full] = arg\r\n\r\n if known_count[arg.abbrev] == 0:\r\n args_abbrev[arg.abbrev] = arg\r\n elif known_count[arg.abbrev] == 1:\r\n new_abbrev = arg.abbrev.upper()\r\n args_full[arg.full] = self.OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default)\r\n args_abbrev[new_abbrev] = args_full[arg.full]\r\n else:\r\n new_abbrev = arg.abbrev.upper() + str(known_count[arg.abbrev])\r\n args_full[arg.full] = self.OptionalArg(full=arg.full, abbrev=new_abbrev, default=arg.default)\r\n args_abbrev[new_abbrev] = args_full[arg.full]\r\n known_count[arg.abbrev] += 1\r\n return args_full, args_abbrev",
"def optional():",
"def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n 'geoLocation': None,\n 'geometry': None,\n 'summary': None,\n 'description': None,\n 'descriptionHtml': None,\n 'address': None,\n 'city': None,\n 'state': None,\n 'zip': None,\n 'county': None,\n 'telephone': None,\n 'fax': None,\n 'thumbnails': [],\n 'images': [],\n 'departments': [],\n 'website': None,\n 'sqft': None,\n 'calendar': None,\n 'campus': None,\n 'girCount': None,\n 'girLimit': False,\n 'girLocations': None,\n 'synonyms': [],\n 'bldgId': None,\n 'parkingZoneGroup': None,\n 'propId': None,\n 'adaParkingSpaceCount': None,\n 'motorcycleParkingSpaceCount': None,\n 'evParkingSpaceCount': None,\n 'weeklyMenu': None,\n 'notes': None,\n 'labels': {},\n 'steward': None,\n 'shape': {}\n }",
"def _none(self, attrs):\n\n [self.__setattr__(nm, None) if nm[0] == '_' else\n self._data.pop(nm, None) for nm in attrs]",
"def clean_optional(self):\n for o in self.optional():\n if hasattr(self, o) and getattr(self, o) is no_default:\n delattr(self, o)",
"def is_optional(self):\n raise exceptions.NotImplementedError()",
"def createOr(self):\n return _libsbml.FbcAnd_createOr(self)",
"def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs",
"def init_attrs(self):\n raise NotImplementedError",
"def __init__(self, attrs = None):\n\n if attrs != None:\n self.__dict__.update(attrs)",
"def get_non_null_attributes(self) -> dict:\n return {\n key: getattr(self, key, None)\n for key in sorted(self.attributes)\n if getattr(self, key, None) is not None\n }",
"def get_joint_attrib_template():\n return {\n \"type\": \"free\",\n }",
"def get_bare_metal_create_options(self):\r\n hw_id = self.get_bare_metal_package_id()\r\n\r\n if not hw_id:\r\n return None\r\n\r\n return self._parse_package_data(hw_id)",
"def _construct_standardized_metadata(self):\n return None",
"def __getattribute__(self, name: str) -> Any:\n if name == \"_unready_attributes\":\n pass\n elif hasattr(self, \"_unready_attributes\") and name in self._unready_attributes:\n raise OWSConfigNotReady(f\"The following parameters have not been initialised: {self._unready_attributes}\")\n return object.__getattribute__(self, name)",
"def is_optional(cls) -> bool:\n return cls.optional",
"def test_optional_group_empty(self):\n\n self.mapper.map_spec('attr3', self.mapper.spec.get_group('empty').get_attribute('attr3'))\n\n bar_inst1 = Bar('my_bar1', list(range(10)), 'value1', 10)\n bar_inst1._Bar__attr3 = None # force attr3 to be None\n builder = self.mapper.build(bar_inst1, self.manager)\n\n expected = GroupBuilder(\n name='my_bar1',\n )\n self.assertBuilderEqual(expected, builder)",
"def __optional_is_not_given(self):\n\n strTestName = 'Optional parameter is not given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddOpt('optional_parameter', 'Optional parameter')\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_",
"def createOr(self):\n return _libsbml.FbcOr_createOr(self)",
"def __init__(self):\n self.clean_optional()",
"def test_optional(self):\n x = t.Optional(t.Exactly(\"x\"))\n self.assertEqual(writePython(x),\n dd(\"\"\"\n def _G_optional_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n def _G_optional_3():\n return (None, self.input.nullError())\n _G_or_4, lastError = self._or([_G_optional_1, _G_optional_3])\n self.considerError(lastError, None)\n _G_or_4\n \"\"\"))",
"def __init__(self, email: str=None, is_bot: bool=None, avatar_url: str=None, avatar_version: int=None, full_name: str=None, is_admin: bool=None, is_owner: bool=None, is_billing_admin: bool=None, role: int=None, bot_type: int=None, user_id: int=None, bot_owner_id: int=None, is_active: bool=None, is_guest: bool=None, timezone: str=None, date_joined: str=None, delivery_email: str=None, profile_data: Dict[str, object]=None):\n self.openapi_types = {\n 'email': str,\n 'is_bot': bool,\n 'avatar_url': str,\n 'avatar_version': int,\n 'full_name': str,\n 'is_admin': bool,\n 'is_owner': bool,\n 'is_billing_admin': bool,\n 'role': int,\n 'bot_type': int,\n 'user_id': int,\n 'bot_owner_id': int,\n 'is_active': bool,\n 'is_guest': bool,\n 'timezone': str,\n 'date_joined': str,\n 'delivery_email': str,\n 'profile_data': Dict[str, object]\n }\n\n self.attribute_map = {\n 'email': 'email',\n 'is_bot': 'is_bot',\n 'avatar_url': 'avatar_url',\n 'avatar_version': 'avatar_version',\n 'full_name': 'full_name',\n 'is_admin': 'is_admin',\n 'is_owner': 'is_owner',\n 'is_billing_admin': 'is_billing_admin',\n 'role': 'role',\n 'bot_type': 'bot_type',\n 'user_id': 'user_id',\n 'bot_owner_id': 'bot_owner_id',\n 'is_active': 'is_active',\n 'is_guest': 'is_guest',\n 'timezone': 'timezone',\n 'date_joined': 'date_joined',\n 'delivery_email': 'delivery_email',\n 'profile_data': 'profile_data'\n }\n\n self._email = email\n self._is_bot = is_bot\n self._avatar_url = avatar_url\n self._avatar_version = avatar_version\n self._full_name = full_name\n self._is_admin = is_admin\n self._is_owner = is_owner\n self._is_billing_admin = is_billing_admin\n self._role = role\n self._bot_type = bot_type\n self._user_id = user_id\n self._bot_owner_id = bot_owner_id\n self._is_active = is_active\n self._is_guest = is_guest\n self._timezone = timezone\n self._date_joined = date_joined\n self._delivery_email = delivery_email\n self._profile_data = profile_data",
"def get_optional(cls, wrapped: FakeAnnotation) -> FakeAnnotation:\n if (\n isinstance(wrapped, TypeSubscript)\n and isinstance(wrapped.parent, TypeAnnotation)\n and wrapped.parent.is_union()\n ):\n result = wrapped.copy()\n result.add_child(cls.none)\n return result\n return TypeSubscript(cls.Optional, [wrapped])",
"def _validate_usage_of_optional(self) -> None:\n # Because None can be the default value, None cannot be used to to indicate no default. This is why we need the optional field. This check prevents users of InputSpec from setting these two values to an inconsistent state, forcing users of InputSpec to be explicit about optionality.\n if self.optional is False and self.default is not None:\n raise ValueError(\n f'`optional` argument to {self.__class__.__name__} must be True if `default` is not None.'\n )",
"def _merge_attributes(self, workout):\n keys = self.__table__.columns.keys()\n for key in keys:\n if key in [\"id\",\n \"external_id\",\n \"is_duplicate_with\",\n \"manual_check_required_with\",\n ]:\n continue\n elif getattr(self, key) == None:\n # copy attribute if empty; else keep existing \n setattr(self, key, getattr(workout, key))"
]
| [
"0.6076875",
"0.5928884",
"0.59069616",
"0.59017825",
"0.5634378",
"0.5530078",
"0.5340581",
"0.52882504",
"0.52593064",
"0.52090013",
"0.51132935",
"0.5093037",
"0.50776744",
"0.50472337",
"0.50441504",
"0.5007685",
"0.500275",
"0.49961215",
"0.49829775",
"0.49819085",
"0.49770907",
"0.49651155",
"0.49632353",
"0.49538335",
"0.49354774",
"0.49156567",
"0.48884824",
"0.48856816",
"0.48778963",
"0.48689288"
]
| 0.8182207 | 0 |
Gets parent GO IDs up to k levels back | def get_k_parents(self, k = None):
if k is None:
return self.get_all_parents()
else:
k_parents = set()
cur_parents = self.parents
cur_parents_ids = [cur_parent.id for cur_parent in cur_parents]
for i in range(0, k):
k_parents.update(cur_parents_ids)
new_cur_parents = set()
for cur_parent in cur_parents:
new_cur_parents.update(cur_parent.parents)
cur_parents = new_cur_parents
cur_parents_ids = [cur_parent.id for cur_parent in cur_parents]
return k_parents | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_parents(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_parents()\n return set_parents",
"def _get_parents(self):\n parents = []\n parent = self.parent\n while(parent):\n parents.append(parent)\n parent = parent.parent\n parents.reverse()\n return parents",
"def parent_ids(self):\n return self._parent_ids",
"def getParents(obj):",
"def get_parents(self):\n return self.parents",
"def getParents(self):\n return self.parents[:]",
"def parent(self):\n other = self\n while True:\n for rev in other._hgmo['parents']:\n parent = Push(rev)\n if parent.id != self.id:\n return parent\n other = parent",
"def get_parent_index(self):\n return (self.index - 1) // 2",
"def find_parents(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].parents = []\r\n for i in range(len(self.vertices)):\r\n for child in self.vertices[i].children:\r\n if i not in self.vertices[child].parents:\r\n self.vertices[child].parents.append(i)",
"def parents(self):\n p = self\n result = []\n while p:\n result.append(p)\n p = p.parent\n return result",
"def get_parents_list(self):\n return []",
"def layer_parent_nid(self, layer_id):\n ...",
"def get_parents(self):\r\n\r\n raise NotImplementedError()",
"def get_parents(self, node):\n pass",
"def get_reference_node_parents(ref):\n parents = []\n return parents",
"def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents",
"def _get_parents(stable_id):\n react_data_url = 'http://www.reactome.org/ContentService/data/entity/' + \\\n stable_id + '/componentOf'\n headers = {'Accept': 'application/json'}\n res = requests.get(react_data_url, headers=headers)\n if not res.status_code == 200:\n return []\n json = res.json()\n names = []\n stable_ids = []\n schema_classes = []\n for parent_group in json:\n if not parent_group.get('type') in \\\n ['hasComponent', 'hasMember', 'hasCandidate']:\n continue\n names += parent_group.get('names')\n stable_ids += parent_group.get('stIds')\n schema_classes += parent_group.get('schemaClasses')\n parents_at_this_level = list(zip(names, stable_ids, schema_classes))\n parents_at_next_level_up = []\n for p_name, p_id, sc in parents_at_this_level:\n parents_at_next_level_up += _get_parents(p_id)\n return parents_at_this_level + parents_at_next_level_up",
"def GetParents(self, p_int, p_int_1, p_int_2):\n ...",
"def find_parent_terms(go_id, go_dict):\n\n go_set = set()\n values = go_dict[go_id]\n\n for value in values:\n go_set.add(value)\n more_values = find_parent_terms(value, go_dict)\n for more_value in more_values:\n go_set.add(more_value)\n\n return go_set",
"def getAncestors():",
"def get_parents(graph, node):\n return list(graph.pred[node].keys())",
"def get_parents(all_parents, current):\r\n for parent in all_parents.get(current, []):\r\n yield parent\r\n for grandparent in get_parents(all_parents, parent):\r\n yield grandparent",
"def get_parent(self, index):\n return (index - 1) // (2)",
"def get_chebi_parents(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == 'is a')]\n else:\n return []",
"def ancestors(self):\r\n ret = []\r\n workunit = self\r\n while workunit is not None:\r\n ret.append(workunit)\r\n workunit = workunit.parent\r\n return ret",
"def get_parents(self):\n return NodeList(self._my_map['parentNodes'])",
"def _parent_chain(self):\n chain = [self]\n while True:\n try:\n parent = chain[-1].parent\n except Exception:\n break\n if parent is None:\n break\n chain.append(parent)\n return chain",
"def parents(self, path):\n pass",
"def lasts(self):\r\n return self._parents_of[None]",
"def _parent(self, j):\n return (j - 1) // 2"
]
| [
"0.6740107",
"0.6590704",
"0.65110725",
"0.64462346",
"0.6219877",
"0.6115461",
"0.6057378",
"0.60571146",
"0.60565615",
"0.6040938",
"0.6035474",
"0.60345376",
"0.6034505",
"0.5995139",
"0.59800035",
"0.5976643",
"0.5934511",
"0.5923506",
"0.5918514",
"0.58731055",
"0.58563775",
"0.585531",
"0.5838067",
"0.58347476",
"0.5821269",
"0.5810332",
"0.5796084",
"0.57921696",
"0.57836354",
"0.5769676"
]
| 0.7250033 | 0 |
Returns a set containing parents and relationship GO Terms. | def get_goterms_upper(self):
# Requires GODag is created with 'relationship' in optional_attrs argument
# pylint: disable=no-member
return set.union(self.parents, *self.relationship.values()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_goterms_lower(self):\n # Requires GODag is created with 'relationship' in optional_attrs argument\n # pylint: disable=no-member\n return set.union(self.children, *self.relationship_rev.values())",
"def relations(self):\n return set(self.triples()[\"relation\"])",
"def find_parent_terms(go_id, go_dict):\n\n go_set = set()\n values = go_dict[go_id]\n\n for value in values:\n go_set.add(value)\n more_values = find_parent_terms(value, go_dict)\n for more_value in more_values:\n go_set.add(more_value)\n\n return go_set",
"def _get_parents(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_super_classes(self)",
"def _go_terms_to_list(self, go_cat=None, set_go_terms=None, relation=\"parent\"):\n def rep(term, cat_str):\n return term.replace(\"level-\", \"\").replace(\"depth-\", \"\").replace(\" [{}]\".format(cat_str), \"\")\n if relation not in [\"parent\", \"child\"]:\n raise ValueError(\"'relation' must be parent or child\")\n list_relations = [[rep(x, self.dict_go_ns[go_cat]) for x in str(self.dict_go[term]).split(\"\\t\")] +\n [relation] for term in set_go_terms]\n return list_relations",
"def parents(self) -> set[\"HierarchicalCategory\"]:\n return self.categorization.parents(self)",
"def get_all_parents(self):\n all_parents = set()\n for parent in self.parents:\n all_parents.add(parent.id)\n all_parents |= parent.get_all_parents()\n return all_parents",
"def get_parents_list(self):\n return []",
"def common_parent(terms, go):\n # Find candidates from first\n rec = go[terms[0]]\n candidates = rec.get_all_parents()\n candidates.update({terms[0]})\n\n # Find intersection with second to nth term\n for term in terms[1:]:\n rec = go[term]\n parents = rec.get_all_parents()\n parents.update({term})\n # Find the intersection with the candidates, and update.\n candidates.intersection_update(parents)\n return candidates",
"def _get_parents(self):\n if not self.ontology:\n raise ValueError(\"No associated ontology.\")\n\n return self.ontology.get_super_properties(self)",
"def getAncestors():",
"def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }",
"def related(self):\n return [ch for ch in self.sentence.chunks \n if ch != self and intersects(unzip(0, ch.relations), unzip(0, self.relations))]",
"def fm_all_parents(self):\n return self._relation_lst[self.PARENT].copy()",
"def cousins(word, level):\r\n parents = [get_synset(word)]\r\n for _ in range(level):\r\n parent_hypernyms = [parent.hypernyms() for parent in parents]\r\n parents = list(chain(*parent_hypernyms))\r\n\r\n children = parents\r\n for _ in range(level):\r\n children_hyponyms = [child.hyponyms() for child in children]\r\n children = list(chain(*children_hyponyms))\r\n\r\n children = set(children)\r\n children -= {get_synset(word)}\r\n return set(map(synset_name, children))",
"def parents(self, taxonomy):\n\n p = defaultdict(list)\n for taxon_id, taxa in taxonomy.items():\n p[taxon_id] = taxa\n for i, taxon in enumerate(taxa):\n if i != 0:\n p[taxon] = taxa[0:i]\n\n return p",
"def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)",
"def get_parents(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_parents()\n return set_parents",
"def ancestors(self):\n for a in self._related(set(), 'parents'):\n yield a",
"def external_terminologies(self):\n terms = set()\n for node_record in self.graph.run(\"MATCH (n) RETURN (n)\"):\n node = node_record[\"n\"]\n if \"links_to\" in node:\n terms.add(node[\"links_to\"])\n return terms",
"def getParents(self):\n return self.parents[:]",
"def get_parent_depends(self):\n union_parent_depends = set()\n for name, parent in self.parents.items():\n union_parent_depends.update(set(parent.depends))\n return union_parent_depends",
"def parents(self):\n return tuple(self._parents)",
"def get_parents(self):\r\n\r\n raise NotImplementedError()",
"def gather_entities(self):\n entitylist = set()\n for entity in self.entities.all():\n entitylist.add(entity)\n entitylist.update(entity.get_ancestors())\n return entitylist #set(entity for entity in entitylist if not entity.abstract_entity)",
"def entities(self):\n triples = self.triples()\n return set(pd.concat((triples[\"head\"], triples[\"tail\"])))",
"def _get_parents(self):\n parents = []\n parent = self.parent\n while(parent):\n parents.append(parent)\n parent = parent.parent\n parents.reverse()\n return parents",
"def _get_children(self):\n return set()",
"def children(self, taxon, taxonomy):\n\n c = set()\n for taxon_id, taxa in taxonomy.items():\n if taxon in taxa:\n\n if taxon.startswith('s__'):\n c.add(taxon_id)\n else:\n taxon_index = taxa.index(taxon)\n for child in taxa[taxon_index + 1:]:\n if len(child) > 3: # not just an empty prefix\n c.add(child)\n\n return c",
"def parents_of(self, kw):\n nn = self.getNode(kw)\n return [e.n1.kw for e in nn.edgein]"
]
| [
"0.7205081",
"0.69444543",
"0.691439",
"0.65272766",
"0.6383208",
"0.6374926",
"0.63622785",
"0.63477033",
"0.63416654",
"0.6335229",
"0.63303053",
"0.6289184",
"0.6276815",
"0.62548405",
"0.624102",
"0.6234652",
"0.61212283",
"0.6087649",
"0.6087368",
"0.60715574",
"0.6070123",
"0.6066056",
"0.60401464",
"0.6028093",
"0.5990863",
"0.598867",
"0.5981695",
"0.5980559",
"0.5975817",
"0.59699166"
]
| 0.726774 | 0 |
Returns a set containing children and reverserelationship GO Terms. | def get_goterms_lower(self):
# Requires GODag is created with 'relationship' in optional_attrs argument
# pylint: disable=no-member
return set.union(self.children, *self.relationship_rev.values()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_goterms_upper(self):\n # Requires GODag is created with 'relationship' in optional_attrs argument\n # pylint: disable=no-member\n return set.union(self.parents, *self.relationship.values())",
"def _get_children(self):\n return set()",
"def children(self, taxon, taxonomy):\n\n c = set()\n for taxon_id, taxa in taxonomy.items():\n if taxon in taxa:\n\n if taxon.startswith('s__'):\n c.add(taxon_id)\n else:\n taxon_index = taxa.index(taxon)\n for child in taxa[taxon_index + 1:]:\n if len(child) > 3: # not just an empty prefix\n c.add(child)\n\n return c",
"def children(g, parent):\n res = set()\n\n if isinstance(parent, Production):\n prods = [parent]\n else:\n prods = g.productions(parent)\n\n for prod in prods:\n for item in prod.rhs():\n if is_nonterminal(item):\n res.add(item)\n\n return res",
"def relations(self):\n return set(self.triples()[\"relation\"])",
"def get_nodes(self):\n return_set = set()\n for value in self._name:\n return_set.add(value)\n return return_set",
"def nodes(self):\n return set(self.values())",
"def get_all_children(self):\n all_children = set()\n for parent in self.children:\n all_children.add(parent.id)\n all_children |= parent.get_all_children()\n return all_children",
"def descendants(self):\n for a in self._related(set(), 'children'):\n yield a",
"def get_all_terms(self):\n return self.term.all()",
"def setOfBetas(self, free=True, fixed=False):\n s = set()\n for e in self.children:\n s = s.union(e.setOfBetas(free, fixed))\n return s",
"def children(self) -> Iterable[Heirarchical]:\n return []",
"def terms(self):\n return self._terms",
"def get_all_descriptor_terms(self):\n\t\tall_terms = set()\n\t\tfor ranking in self.get_descriptors(self.top_terms):\n\t\t\tall_terms = set(ranking).union(all_terms)\n\t\treturn sorted(all_terms)",
"def find_parent_terms(go_id, go_dict):\n\n go_set = set()\n values = go_dict[go_id]\n\n for value in values:\n go_set.add(value)\n more_values = find_parent_terms(value, go_dict)\n for more_value in more_values:\n go_set.add(more_value)\n\n return go_set",
"def children(self):\n if self._children is None:\n return set()\n else:\n return self._children",
"def leaves(self):\n if self.keys():\n return set().union(*[child.leaves() for child in self.itervalues()])\n else:\n return set(self.name)",
"def get_nodes(self):\n return_set = set()\n for key in self._main_dictionary:\n return_set.add(key)\n return return_set",
"def taxon_children(self, taxonomy):\n\n taxon_children = defaultdict(set)\n for taxon_id, taxa in taxonomy.items():\n for i, taxon in enumerate(taxa):\n if len(taxon) == 3:\n continue # just rank prefix\n\n if len(taxa) > i + 1 and len(taxa[i + 1]) != 3:\n taxon_children[taxon].add(taxa[i + 1])\n\n if len(taxa) > self.rank_index['s__']:\n taxon = taxa[self.rank_index['s__']]\n if taxon != 's__':\n taxon_children[taxon].add(taxon_id)\n\n return taxon_children",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def _get_terms(self):\n return self.__terms",
"def descendant_type_set(self) -> Set[str]:\n return set(\n chain.from_iterable(\n seg.descendant_type_set | seg.class_types for seg in self.segments\n )\n )",
"def descendants(self) -> set[\"HierarchicalCategory\"]:\n return self.categorization.descendants(self)"
]
| [
"0.7018476",
"0.7015497",
"0.6693456",
"0.6552289",
"0.6530311",
"0.6424423",
"0.6325786",
"0.6248133",
"0.62194085",
"0.61937857",
"0.6164341",
"0.61638975",
"0.61285734",
"0.6123821",
"0.6115087",
"0.6100571",
"0.60878086",
"0.60534227",
"0.60302037",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60205144",
"0.60075957",
"0.5998901"
]
| 0.7491547 | 0 |
Write hierarchy for a GO Term record. | def write_hier_rec(self, gos_printed, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None,
depth=1, depth_dashes="-"):
# Added by DV Klopfenstein
goid = self.id
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if include_only is not None and goid not in include_only:
return
nrp = short_prt and goid in gos_printed
if go_marks is not None:
out.write('{} '.format('>' if goid in go_marks else ' '))
if len_dash is not None:
# Default character indicating hierarchy level is '-'.
# '=' is used to indicate a hierarchical path printed in detail previously.
letter = '-' if not nrp or not self.children else '='
depth_dashes = ''.join([letter]*depth)
out.write('{DASHES:{N}} '.format(DASHES=depth_dashes, N=len_dash))
if num_child is not None:
out.write('{N:>5} '.format(N=len(self.get_all_children())))
out.write('{GO}\tL-{L:>02}\tD-{D:>02}\t{desc}\n'.format(
GO=self.id, L=self.level, D=self.depth, desc=self.name))
# Track GOs previously printed only if needed
if short_prt:
gos_printed.add(goid)
# Do not print hierarchy below this turn if it has already been printed
if nrp:
return
depth += 1
if max_depth is not None and depth > max_depth:
return
for child in self.children:
child.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt,
include_only, go_marks,
depth, depth_dashes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(obj, handle, **kwargs):\n trees = list(obj)\n writer = NewickIO.Writer(trees)\n nexus_trees = [\n TREE_TEMPLATE % {\"index\": idx + 1, \"tree\": nwk}\n for idx, nwk in enumerate(\n writer.to_strings(plain=False, plain_newick=True, **kwargs)\n )\n ]\n tax_labels = [str(x.name) for x in chain(*(t.get_terminals() for t in trees))]\n text = NEX_TEMPLATE % {\n \"count\": len(tax_labels),\n \"labels\": \" \".join(tax_labels),\n \"trees\": \"\\n\".join(nexus_trees),\n }\n handle.write(text)\n return len(nexus_trees)",
"def write_hier(self, go_id, out=sys.stdout,\n len_dash=1, max_depth=None, num_child=None, short_prt=False,\n include_only=None, go_marks=None):\n gos_printed = set()\n self[go_id].write_hier_rec(gos_printed, out, len_dash, max_depth, num_child,\n short_prt, include_only, go_marks)",
"def create_hierarchy(self):\n\t\tpass",
"def write_joint_hierarchy(bvh_tree, filepath, scale=1.0):\n data = list()\n for joint in bvh_tree.get_joints(end_sites=True):\n joint_name = joint.name\n parent_name = bvh_tree.joint_parent(joint_name).name if bvh_tree.joint_parent(joint_name) else ''\n row = [joint_name, parent_name]\n row.extend((scale * offset for offset in bvh_tree.joint_offset(joint.name)))\n data.append(tuple(row))\n data = np.array(data, dtype=[('joint', np.unicode_, 20),\n ('parent', np.unicode_, 20),\n ('offset.x', np.float),\n ('offset.y', np.float),\n ('offset.z', np.float)])\n try:\n np.savetxt(filepath,\n data,\n header=','.join(data.dtype.names),\n fmt=['%s', '%s', '%10.5f', '%10.5f', '%10.5f'],\n delimiter=',',\n comments='')\n return True\n except IOError as e:\n print(\"ERROR({}): Could not write to file {}.\\n\"\n \"Make sure you have writing permissions.\\n\".format(e.errno, filepath))\n return False",
"def write_node(self, record) -> None:\n pass",
"def writeTaxonomies( self ):\n\n self.logger.info( 'writeTaxonomies: START' )\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): START' )\n\n organisms = self.reader.getAllOrganisms()\n\n self.logger.info( 'writeTaxonomies: keggreader.getAllOrganisms(): DONE' )\n\n taxonomies = {} \n\n taxonomyFile = self.openInsertFile( 'taxonomiesInsert.psql' )\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(organisms)) + ' organisms and our insert file is taxonomiesInsert.psql' )\n\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxonomies[ tax['name'] ] = { 'name': tax['name'], 'tax_id': tax['tax_id'], 'type': tax['type'] } \n\n\n self.logger.info( 'writeTaxonomies: We got ' + str(len(taxonomies)) + ' taxonomies.' )\n\n\n for taxonomy,taxData in taxonomies.iteritems():\n taxonomyInserted = self.writeFile( taxonomyFile, 'taxonomies', [ str(taxData['name']), str(taxData['tax_id']), str(taxData['type']) ] )\n self.taxonomiesInserted[ taxData['name'] ] = taxonomyInserted\n\n self.logger.info( 'writeTaxonomies: DONE' )",
"def write_hier_all(self, out=sys.stdout,\n len_dash=1, max_depth=None, num_child=None, short_prt=False):\n # Print: [biological_process, molecular_function, and cellular_component]\n for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:\n self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)",
"def write_t_rec(self, t, autoAnchor=None, align='left', parent=None, level=0):\n if parent is None:\n parent = self.temp_div # Dummy div\n current = E.P()\n parent.append(current)\n else:\n current = parent\n if t.text:\n if \"anchor\" in t.attrib:\n a = E.A(name=t.attrib[\"anchor\"])\n a.tail = t.text\n current.append(a)\n else:\n current.text = t.text\n if autoAnchor:\n current.attrib['id'] = autoAnchor\n for child in t:\n if child.tag in ['xref', 'eref', 'iref', 'cref', 'spanx']:\n for element in self._expand_ref(child):\n current.append(element)\n elif child.tag == 'u':\n for element in self._expand_u(child):\n current.append(element)\n elif child.tag == 'vspace':\n br = E.BR()\n current.append(br)\n blankLines = int(child.attrib.get('blankLines',\n self.defaults['vspace_blanklines']))\n for i in range(blankLines):\n br = E.BR()\n current.append(br)\n if child.tail:\n br.tail = child.tail\n elif child.tag == 'list':\n self.write_list(child, parent, level=level)\n if child.tail:\n parent.append(E.P(child.tail))\n elif child.tag == 'figure':\n # Callback to base writer method\n self.write_figure(child)\n elif child.tag == 'texttable':\n # Callback to base writer method\n self.write_table(child)\n # If we are back at top level, serialize the whole temporary structure\n # Add to body buffer\n if parent == self.temp_div:\n self.buf.append(self._flush_temp_div())",
"def write(self,fout,line):\n \n def trans(a,m):\n if a == '0': return m[0]+'\\t'+m[0]\n if a == '1': return m[0]+'\\t'+m[1]\n if a == '2': return m[1]+'\\t'+m[1]\n return '0\\t0'\n\n if not line: return\n animal = line[0]\n if animal in self.ped:\n father,mother = self.ped[animal]['father'],self.ped[animal]['mother']\n sex = self.ped[animal]['sex']\n phe = self.ped[animal]['phe']\n family = self.ped[animal]['family'][0]\n else:\n father,mother,sex,phe,family = '0','0','3','-9','0'\n if len(self.mark['marklist']) > 0:\n lmark = self.mark['marklist']\n fout.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (family,animal,father,mother,sex,phe,\n '\\t'.join([trans(line[i+1],self.mark[name]['a1']+self.mark[name]['a2']) for i,name in enumerate(lmark)])))\n else:\n fout.write('%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (family,animal,father,mother,sex,phe,'\\t'.join(line[1:])))",
"def create_hierarchy(self):\n\t\tif self.level is not None:\n\t\t\treturn\n\t\t\n\t\tself.size = 0\n\t\tsubtype = self.subtype.type\n\t\tif subtype.level is None:\n\t\t\tif self.subtype.size == 0:\n\t\t\t\traise ParserException(\"Loop in the definition of '%s' and '%s' detected!\" % (self.name, self.subtype.name))\n\t\t\tsubtype.create_hierarchy()\n\t\t\n\t\tself.level = subtype.level + 1\n\t\tself.size = subtype.size",
"def write(self, entry):\n if entry is \"all\":\n for k, item in self.nodes.items():\n if type(item) is list:\n for node in item:\n node.write()\n else:\n item.write()\n else:\n # other option is 'rooms'\n for node in self.nodes[entry]:\n node.write()",
"def write_tree(tree: dict, path: str) -> None:\n with open(path, mode=\"w\", encoding=\"utf-8\") as file_object:\n file_object.write(json.dumps(tree, indent=4))",
"def make_drs_tree(self):\n pass",
"def addtree(self, dct) -> None:\n namelst = dct['name'].split('\\\\')\n # print('nlst {}'.format(namelst))\n n_n = self\n for curname in namelst:\n nextlevel = n_n.child_dct.get(curname, None)\n if nextlevel is None:\n nextlevel = n_n.child_dct[curname] = LocNode(curname)\n n_n = nextlevel\n n_n.setval(dct)",
"def write(self, values):\n TYPE = self.env['anytracker.ticket.type']\n children = None\n if 'parent_id' in values:\n root_id = self.browse(values['parent_id'])._get_root().id\n values['project_id'] = root_id\n for ticket in self:\n if ticket.id == values['parent_id']:\n raise except_orm(\n _('Error'),\n _(u\"Think of yourself. Can you be your own parent?\"))\n # if reparenting to False,\n # propagate the current ticket as project for children\n project_id = root_id or ticket.id\n # set the project_id of me and all the children\n children = self.search([('id', 'child_of', ticket.id)])\n super(Ticket, children).write({'project_id': project_id})\n self.browse(values['parent_id']).recompute_subtickets()\n if 'active' in values:\n for ticket in self:\n children = self.search([\n ('id', 'child_of', ticket.id),\n ('active', '=', not values['active'])])\n super(Ticket, children).write({'active': values['active']})\n\n if 'participant_ids' in values:\n if len(self) > 1:\n raise except_orm(\n _('Error !'),\n _('You can modify participants for 1 ticket at a time'))\n participant_ids = set(self.participant_ids.ids)\n # replace ticket numbers with permalinks\n if 'description' in values:\n values['description'] = add_permalinks(\n self.env.cr.dbname, values['description'])\n\n # don't allow to set a node as ticket if it has children\n if values.get('type'):\n type_id = values['type']\n for ticket in self:\n if ticket.child_ids and not TYPE.browse(type_id).has_children:\n del values['type']\n\n res = super(Ticket, self).write(values)\n if 'parent_id' in values:\n for ticket in self:\n method_id = (ticket.parent_id.method_id.id\n if values['parent_id'] is not False\n else ticket.method_id.id)\n super(Ticket, children).write({'method_id': method_id})\n # correct the parent to be a node\n if 'parent_id' in values:\n types = TYPE.search([('code', '=', 'node')])\n if types:\n self.browse(values['parent_id']).write({'type': types[0].id})\n\n if 'participant_ids' in values:\n # subscribe new participants, unsubscribe old ones\n new_p_ids = set(self.participant_ids.ids)\n added_users = new_p_ids - participant_ids\n removed_users = participant_ids - new_p_ids\n self.message_unsubscribe_users(removed_users)\n self.message_subscribe_users(added_users)\n # Needed for the ir_rule,\n # because it involves an sql request for _search_allowed_partners\n self.env.invalidate_all()\n\n return res",
"def createHierarchy(self, hierarchy):\n self.tprint('create_bd_cell -type hier ' + hierarchy)",
"def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)",
"def write(self, fout: BinaryIO, dflevel: Level, _: Any) -> None:\n with DFWriter(fout, noclose=True) as writer:\n writer.write_level(dflevel)",
"def writeOrganismTaxonomies( self ):\n\n self.logger.info( 'writeOrganismTaxonomies: START' )\n\n organisms = self.reader.getAllOrganisms()\n\n taxonomies = {} \n\n self.logger.info( 'writeOrganismTaxonomies: insert file will be organismTaxonomiesInsert.psql' )\n\n taxonomyFile = self.openInsertFile( 'organismTaxonomiesInsert.psql' )\n\n for organism,taxonomyData in organisms.iteritems():\n for tax in taxonomyData['lineage']:\n\n taxId = self.taxonomiesInserted[ tax['name'] ] \n organismId = self.importerOrganism.organismsInserted[ organism ] \n\n self.writeFile( taxonomyFile, 'organism_taxonomies', [ str(organismId), str(taxId) ] )\n\n\n self.logger.info( 'writeOrganismTaxonomies: DONE' )",
"def write_nml(self, nml_write_path):\n\n # If the object does not have any trees, construct an empty tree before writing to enable webKnossos import\n if self.num_trees() == 0:\n self.add_tree()\n\n nml = self._skeleton_to_nml()\n with open(nml_write_path, \"wb\") as f:\n wknml.write_nml(f, nml)",
"def make_forest (self, doclets):\n\n for o in doclets:\n if o.memberof is None:\n continue\n if o.memberof in self.longnames:\n o.parent = self.longnames[o.memberof]\n o.parent.children.append (o)\n continue\n if o.doc ():\n if o.memberof == '<anonymous>':\n o.error (\"\"\"Could not link up object %s to %s.\n Try giving the anonymous object an @alias.\"\"\"\n % (o.longname, o.memberof))\n else:\n o.error (\"Could not link up object %s to %s\" % (o.longname, o.memberof))",
"def write (self, path):\n\t\ts=[]; add=s.append\n\t\tadd ('\\t'.join (self.schema))\n\t\tfor record in self.data:\n\t\t\tadd (record.asTabDelimitedRecord())\n\t\t\n\t\t# f = open (path, 'w')\n\t\tf = codecs.open(path, 'w', 'utf-8')\n\t\tf.write (self.linesep.join (s))\n\t\tf.close()\n\t\tprint (\"data written to \" + path)",
"def write(self, taxonomy, output_file):\n\n fout = open(output_file, 'w')\n for genome_id, taxa in taxonomy.items():\n fout.write(genome_id + '\\t' + ';'.join(taxa) + '\\n')\n fout.close()",
"def create_taxonomy(dataset_name, attr, dataset=[]):\n #path = os.getcwd()\n\n path_in = os.getcwd()\n pattern = '^.*/thesis-data-anonymisation/'\n path_top = re.search(pattern, path_in).group(0)\n\n path = path_top +'data'\n\n if len(dataset_name) > 0:\n prefix = '../data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '../data/hierarchy_'\n\n postfix = '.csv'\n\n try:\n file = open(path + '/' + prefix + attr + postfix, 'r')\n except FileNotFoundError:\n if len(dataset_name) > 0:\n prefix = '/data/'+dataset_name+'/hierarchy_'\n else:\n prefix = '/data/hierarchy_'\n file = open(path+prefix + attr + postfix, 'r')\n\n taxonomy = {}\n #dataset_group = dataset.groupby(attr).groups\n\n lines_in = file.readlines()\n file.close()\n lines = [line.strip().split(';') for line in lines_in]\n max_height = max([len(line) for line in lines])\n try:\n float(lines[0][0])\n is_numeric = True\n except ValueError:\n is_numeric = False\n for line in lines:\n #try:\n # if is_numeric:\n # dataset_group[int(line[0])]\n # else:\n # dataset_group[line[0]]\n #except KeyError:\n # continue\n line.reverse()\n for i, val in enumerate(line):\n is_leaf = False\n if val == '*':\n node = TaxNode(val, None, is_numeric, is_leaf)\n else:\n if i == len(line) - 1:\n is_leaf = True\n\n node = TaxNode(val, taxonomy[line[i - 1]][-1], is_numeric, is_leaf)\n try:\n current_nodes = taxonomy[val]\n already_added = False\n for current_node in current_nodes:\n if current_node.parent is None:\n already_added = True\n elif current_node.parent.value == node.parent.value:\n already_added = True\n if not already_added:\n taxonomy[val].append(node)\n except KeyError:\n taxonomy[val] = [node] # Saves the nodes in a list in case of several parents (only valid for nodes with several parents!!!)\n hierarchy = Taxonomy(taxonomy, max_height)\n\n return hierarchy",
"def write(self, fout: BinaryIO, dflevel: Level, level_data: Any) -> None:\n region_offsets, region_data = level_data\n with DFWriter(fout, noclose=True) as writer:\n writer.write_level_ex(dflevel, region_offsets, region_data)",
"def AddTreeTerm(con, cur, termid, parentid, ontologynameid, commit=True):\n try:\n # test if already exists\n cur.execute('SELECT uniqueId FROM OntologyTreeStructureTable WHERE (ontologyId=%s AND ontologyParentId=%s AND ontologyNameId=%s) LIMIT 1', [termid, parentid, ontologynameid])\n if cur.rowcount > 0:\n sid = cur.fetchone()[0]\n debug(2, 'Tree entry exists (%d). returning it' % sid)\n return '', sid\n # does not exist - lets add it\n cur.execute('INSERT INTO OntologyTreeStructureTable (ontologyId,ontologyParentId,ontologyNameId) VALUES (%s,%s,%s) RETURNING uniqueId', [termid, parentid, ontologynameid])\n sid = cur.fetchone()[0]\n return '', sid\n except psycopg2.DatabaseError as e:\n debug(7, \"error %s enountered in ontology.AddTreeTerm\" % e)\n return \"error %s enountered in ontology.AddTreeTerm\" % e, -2",
"def add_sub_hierarchy(outdoc, type_hierarchy, depth=0, indent_step=' '):\n for k, v in type_hierarchy.items():\n type_list_item = indent_step*depth + '* '\n type_list_item += outdoc.get_reference(RSTSectionLabelHelper.get_section_label(k), k)\n type_list_item += outdoc.newline\n outdoc.add_text(type_list_item)\n if len(v) > 0:\n outdoc.add_text(outdoc.newline)\n add_sub_hierarchy(outdoc=outdoc,\n type_hierarchy=v,\n depth=depth+1,\n indent_step=indent_step)",
"def _write(self, out_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write(self.setname.ljust(6).encode())\n out_file.write('{:12.5E}'.format(self.value).encode())\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write(self.text.ljust(20).encode())\n out_file.write('{:2d}'.format(self.ictype).encode())\n out_file.write('{:5d}'.format(self.numstep).encode())\n out_file.write(self.analys.ljust(10).encode())\n out_file.write('{:2d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n out_file.write(' '.encode()) # pad byte\n out_file.write('-4'.encode()) # key = -4\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(self.name.ljust(8).encode())\n if self.entities[0].ictype == 2 and self.ncomps == 3:\n out_file.write('{:5d}'.format(self.ncomps + 1).encode())\n else:\n out_file.write('{:5d}'.format(self.ncomps).encode())\n out_file.write('{:5d}'.format(self.irtype).encode())\n out_file.write('\\n'.encode()) # eol\n\n for entity in self.entities:\n out_file.write(' '.encode()) # pad byte\n out_file.write('-5'.encode())\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(entity.name.ljust(8).encode())\n out_file.write('{:5d}'.format(entity.menu).encode())\n out_file.write('{:5d}'.format(entity.ictype).encode())\n out_file.write('{:5d}'.format(entity.icind1).encode())\n if entity.ictype == 4:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n elif entity.ictype == 2 and entity is self.entities[-1]:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write(entity.icname.encode())\n else:\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write('\\n'.encode()) # eol\n\n for result in self.results:\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n for j in range(num_lines):\n if j == 0:\n out_file.write(' -1'.encode()) # pad byte and key = -1\n if self.format == 0:\n out_file.write(\n '{:5d}'.format(result.node).encode())\n else:\n out_file.write(\n '{:10d}'.format(result.node).encode())\n else:\n out_file.write(' -2'.encode()) # pad byte and key = -2\n out_file.write(' '*(5*(self.format+1)).encode())\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for k in range(k_start, k_end):\n out_file.write(\n '{:12.5E}'.format(result.data[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', result.node))\n out_file.write(struct.pack('f'*self.ncomps, *result.data))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only",
"def _walk(self, level=0):\n l_dict = self.list_all()\n indent = level * \" \"\n for node in l_dict[\"nodes\"]:\n print(indent + \"node\", node)\n for group in l_dict[\"groups\"]:\n print(indent + \"group: \", group)\n with self.open(group) as hdf_group:\n hdf_group._walk(level=level + 1)",
"def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)"
]
| [
"0.5546483",
"0.5536881",
"0.5504827",
"0.5468284",
"0.5443513",
"0.5434736",
"0.5344939",
"0.5320528",
"0.53101194",
"0.52966833",
"0.5266934",
"0.5233876",
"0.52310437",
"0.5212241",
"0.51944035",
"0.51603746",
"0.51283133",
"0.5121976",
"0.5108549",
"0.5087461",
"0.5085598",
"0.50682807",
"0.50194126",
"0.5002256",
"0.4964945",
"0.4955021",
"0.4954288",
"0.4944657",
"0.49364227",
"0.49364117"
]
| 0.56535333 | 0 |
Read obo file. Store results. | def load_obo_file(self, obo_file, optional_attrs, load_obsolete, prt):
reader = OBOReader(obo_file, optional_attrs)
# Save alt_ids and their corresponding main GO ID. Add to GODag after populating GO Terms
alt2rec = {}
i = 0
for rec in reader:
# Save record if:
# 1) Argument load_obsolete is True OR
# 2) Argument load_obsolete is False and the GO term is "live" (not obsolete)
if load_obsolete or not rec.is_obsolete:
self[rec.id] = rec
for alt in rec.alt_ids:
alt2rec[alt] = rec
# Save the typedefs and parsed optional_attrs
# self.optobj = reader.optobj
self.typedefs = reader.typedefs
self._populate_terms(reader.optobj)
self._set_level_depth(reader.optobj)
# Add alt_ids to go2obj
for goid_alt, rec in alt2rec.items():
self[goid_alt] = rec
desc = self._str_desc(reader)
if prt is not None:
prt.write("{DESC}\n".format(DESC=desc))
return desc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, fileobj):\n raise NotImplementedError",
"def read(self):",
"def _read_data(self):",
"def __iter__(self):\n # Wait to open file until needed. Automatically close file when done.\n with open(self.obo_file) as fstream:\n rec_curr = None # Stores current GO Term\n typedef_curr = None # Stores current typedef\n for line in fstream:\n # obo lines start with any of: [Term], [Typedef], /^\\S+:/, or /^\\s*/\n if self.data_version is None:\n self._init_obo_version(line)\n if rec_curr is None and line[0:6].lower() == \"[term]\":\n rec_curr = GOTerm()\n if self.optobj:\n self.optobj.init_datamembers(rec_curr)\n elif typedef_curr is None and line[0:9].lower() == \"[typedef]\":\n typedef_curr = TypeDef()\n elif rec_curr is not None or typedef_curr is not None:\n line = line.rstrip() # chomp\n if line:\n self._add_to_obj(rec_curr, typedef_curr, line)\n else:\n if rec_curr is not None:\n yield rec_curr\n rec_curr = None\n elif typedef_curr is not None:\n # Save typedef.\n self.typedefs[typedef_curr.id] = typedef_curr\n typedef_curr = None\n # Return last record, if necessary\n if rec_curr is not None:\n yield rec_curr",
"def read():\n # TODO",
"def read_all(self):\r\n pass",
"def read_stock(db, openfile):\n pass",
"def read(self, label):\n # XXX I think we should redo the concept of 'restarting'.\n # It makes sense to load a previous calculation as\n #\n # * static, calculator-independent results\n # * an actual calculator capable of calculating\n #\n # Either of which is simpler than our current mechanism which\n # implies both at the same time. Moreover, we don't need\n # something like calc.read(label).\n #\n # What we need for these two purposes is\n #\n # * calc = MyCalculator.read(basefile)\n # (or maybe it should return Atoms with calc attached)\n # * results = read_results(basefile, format='abinit')\n #\n # where basefile determines the file tree.\n FileIOCalculator.read(self, label)\n self.atoms, self.parameters = io.read_ase_and_abinit_inputs(self.label)\n self.results = io.read_results(self.label, self._output_filename())",
"def read_from_file():\n global REPOS\n with file(OUTPUT, 'r') as infile:\n REPOS = json.loads(infile.read())",
"def read(self):\n pass",
"def load_OM_outputs(file_path):\r\n # Transform the .xls database into panda type\r\n excel = pd.ExcelFile(file_path)\r\n\r\n # Collect data from a particular tab\r\n OM_outputs = excel.parse('OM', header=0, index_col=0)\r\n\r\n return OM_outputs",
"def read_locations(db, openfile):\n pass",
"def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data",
"def read(self, filename):\n pass",
"def read(self, filename):\n pass",
"def _load_closed_orbits(self, filename):\n fin = open(filename)\n closed_orbits = [json.loads(line) for line in fin.readlines()]\n self._get_x(closed_orbits)\n self._get_t(closed_orbits)\n self.energy_list = sorted(self.closed_orbits_t.keys())",
"def _load_closed_orbits(self, filename):\n fin = open(filename)\n closed_orbits = [json.loads(line) for line in fin.readlines()]\n self.closed_orbits_cached = closed_orbits",
"def Open(self, file_object):",
"def readobject(filename):\n # import cPickle as pickle\n with open(filename, 'rb') as input_file:\n return pickle.load(input_file)",
"def _read_recs(basedir):\n for borotag in boro_tags:\n datafile = \"%s/%s.csv\" % (basedir,borotag)\n print(\"slurp '%s' ..\" % datafile)\n recs = read_recs(datafile)\n yield from (pluto.parse.normalize(r) for r in recs)",
"def __loadFromFile(self):\n try:\n f=open(self.__fileR, \"r\")\n line =f.readline().strip()\n rez=[]\n while line!=\"\":\n attrs=line.split(\",\")\n rt=Rent(attrs[0], attrs[1], attrs[2], attrs[3])\n rez.append(rt)\n line=f.readline().strip()\n f.close()\n return rez\n #the file cannot be reached\n except IOError:\n return None",
"def _load_obcfile(casename=None): \n\n data={}\n\n if casename==None:\n print('_load_obcfile requires a filename to load.')\n return\n try:\n fp=open(casename+'_obc.dat','r')\n except IOError:\n print('_load_obcfile: invalid case name.')\n return data\n\n obc_str=fp.readline().split('=')\n obc_num=int(obc_str[1])\n t_data1=np.genfromtxt(casename+'_obc.dat',skip_header=1)\n fp.close()\n\n data['obcf_num']=obc_num\n data['obcf_numbers']=t_data1[:,0]\n data['obcf_nodes']=t_data1[:,1]\n data['obcf_value']=t_data1[:,2]\n\n \n return data",
"def read(self,end_date=None):\n # These 2 lines read the Marine archive file header. The first line\n # contains variable names; the second contains a decimal scale factor\n # for each\n eof = False\n #file_header = self._marine_reader.next()[4:]\n #decscale_header = self._marine_reader.next()[4:]\n file_header = next(self._marine_reader)[4:]\n decscale_header = next(self._marine_reader)[4:]\n decscale_header.pop()\n decscale = [10.0**float(s) for s in decscale_header] # Convert to actual scale floats\n self.observations = file_header\n self.observations.pop() # Remove extra column\n self.observations = strip_array(self.observations)\n\n # Add TYPE and TIME. These values are not in the NDBC Monthly Obs file.\n self.observations.append('TYPE')\n self.observations.append('TIME')\n\n # Iterate over every row (ob) for a given hour; get and catalog the station name\n # of the ob. If it exists in the user input station list, then capture the obs.\n idate = 0\n self.obs_time = 0\n station_list_check = []\n for row in self._marine_reader:\n\n # Conditions to break the loop\n if row[0] == '99999999':\n break\n idate = (int(row[0])*100)+int(row[1])\n if idate > int(end_date):\n # If we are here, we are done reading, but we still need to\n # check for missing obs from the last date.\n self.check_missing_obs(station_list_check)\n break\n if idate > int(self.obs_time):\n # Here means we are at a new date\n if int(self.obs_time) > 0:\n self.check_missing_obs(station_list_check)\n logging.info(\"READING MARINE OBS FOR DATE \"+str(idate))\n station_list_check = []\n\n # Get some information from the row (observation)\n name = row[3].strip() # Get the station name\n station_list_check.append(name)\n self.obs_time = str(idate)\n obs_hour = row[1] # Get the hour of the obs before removing items\n row = row[4:] # Remove elements 0-3\n row.pop() # Remove last empty element\n\n # Apply decimal scale factor. IMPORTANT: Some variables need to be\n # converted to int, then back to string. NumPy cannot convert a\n # float as a string and cast as in int so we do that here.\n for i,(obname,ob,ds) in enumerate(zip(self.observations,row,decscale)):\n if int(ob) != 9999:\n if obname in ['AWPD','DWPD','TEMP','WDIR','WGST','WTMP','WVDR','WVHT']:\n row[i] = str(int(float(ob)*ds))\n else:\n row[i] = str(float(ob)*ds)\n\n # Add TYPE and TIME values for each hourly observation.\n row.append('MARI')\n row.append(obs_hour+'00')\n\n # Added the station observation to the marinereader object.\n if name in list(self.station_list.keys()):\n self.station_list[name].add_record(self.observations,\n row,\n self.obs_time)",
"def __init__(self, obo_file=OBO_FILE, optional_attrs=None):\n self.optobj = self._init_optional_attrs(optional_attrs) # OboOptionalAttrs or None\n self.format_version = None # e.g., \"1.2\" of \"format-version:\" line\n self.data_version = None # e.g., \"releases/2016-07-07\" from \"data-version:\" line\n self.typedefs = {}\n\n # True if obo file exists or if a link to an obo file exists.\n print(\"obo_file:\")\n print(obo_file)\n if os.path.isfile(obo_file):\n self.obo_file = obo_file\n # GOTerm attributes that are necessary for any operations:\n else:\n raise Exception(\"COULD NOT READ({OBO})\\n\"\n \"download obo file first\\n \"\n \"[http://geneontology.org/ontology/\"\n \"go-basic.obo]\".format(OBO=obo_file))",
"def read(self, file, path):\n pos, = struct.unpack('<Q', file.read(8))\n if pos == 0:\n raise VergeMLError(\"Invalid cache file: {}\".format(path))\n file.seek(pos)\n self.index, self.meta, self.info = pickle.load(file)",
"def read_all(self):\n if (not self.exists()):\n raise IOError(\"File at '{}' does not exist.\".format(self.location))\n with open(self.location, 'r') as f:\n return f.read()",
"def readOneData(self):\n\t\tpass",
"def test_load_file_contents():\n\n file_name = 'test_fooof_all'\n loaded_data = load_json(file_name, TEST_DATA_PATH)\n\n # Check settings\n for setting in OBJ_DESC['settings']:\n assert setting in loaded_data.keys()\n\n # Check results\n for result in OBJ_DESC['results']:\n assert result in loaded_data.keys()\n\n # Check results\n for datum in OBJ_DESC['data']:\n assert datum in loaded_data.keys()",
"def read_results(self):\n\n myfile = open(os.path.join(self.directory, 'results.tag'), 'r')\n self.lines = myfile.readlines()\n myfile.close()\n\n # print('atoms before read', self.atoms)\n # print('atoms_input before read', self.atoms_input)\n\n self.atoms = self.atoms_input\n\n charges, energy, free_energy = self.read_charges_and_energy()\n if charges is not None:\n self.results['charges'] = charges\n\n self.results['energy'] = energy\n self.results['free_energy'] = free_energy\n\n if self.do_forces:\n forces = self.read_forces()\n self.results['forces'] = forces\n\n self.mmpositions = None\n\n # stress stuff begins\n sstring = 'stress'\n have_stress = False\n stress = list()\n for iline, line in enumerate(self.lines):\n if sstring in line:\n have_stress = True\n start = iline + 1\n end = start + 3\n for i in range(start, end):\n cell = [float(x) for x in self.lines[i].split()]\n stress.append(cell)\n if have_stress:\n stress = -np.array(stress) * Hartree / Bohr**3\n self.results['stress'] = stress.flat[[0, 4, 8, 5, 2, 1]]\n # stress stuff ends\n\n # TODO: these two seem wrong with DFTB+ master but compatible with 19.1\n # eigenvalues and fermi levels\n #fermi_levels = self.read_fermi_levels()\n #if fermi_levels is not None:\n # self.results['fermi_levels'] = fermi_levels\n #\n #eigenvalues = self.read_eigenvalues()\n #if eigenvalues is not None:\n # self.results['eigenvalues'] = eigenvalues\n\n # calculation was carried out with atoms written in write_input\n os.remove(os.path.join(self.directory, 'results.tag'))",
"def read_all(self, *args, **kwargs):\n pass"
]
| [
"0.6180303",
"0.5986327",
"0.5855927",
"0.57343435",
"0.5720834",
"0.57050353",
"0.5640002",
"0.55223525",
"0.5502251",
"0.54979974",
"0.54961646",
"0.54932064",
"0.5422811",
"0.5407113",
"0.5407113",
"0.53859",
"0.5364427",
"0.53560793",
"0.5344812",
"0.5327998",
"0.53029215",
"0.53016734",
"0.5297577",
"0.5287595",
"0.52727747",
"0.5272167",
"0.52552074",
"0.52534795",
"0.5249637",
"0.52494085"
]
| 0.6070833 | 1 |
Convert GO IDs to GO Term record objects. Populate children. | def _populate_terms(self, optobj):
has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs
# Make parents and relationships references to the actual GO terms.
for rec in self.values():
# Given parent GO IDs, set parent GO Term objects
rec.parents = set([self[goid] for goid in rec._parents])
# For each parent GO Term object, add it's child GO Term to the children data member
for parent_rec in rec.parents:
parent_rec.children.add(rec)
if has_relationship:
self._populate_relationships(rec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _populate_relationships(self, rec_curr):\n for relationship_type, goids in rec_curr.relationship.items():\n parent_recs = set([self[goid] for goid in goids]) \n rec_curr.relationship[relationship_type] = parent_recs # replace GO ID with GO Term record object\n for parent_rec in parent_recs:\n if relationship_type not in parent_rec.relationship_rev:\n parent_rec.relationship_rev[relationship_type] = set([rec_curr])\n else:\n parent_rec.relationship_rev[relationship_type].add(rec_curr)",
"def _subnode_ids(self):\n for ticket in self:\n ticket.subnode_ids = self.search([\n ('parent_id', '=', ticket.id),\n ('type.has_children', '=', True)])",
"def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)",
"def ids_to_tree(self, emb):\n\n tree = TreeData()\n tree.nodes = [] # override the technical root -- the tree will be created including the technical root\n tree.parents = []\n\n # build the tree recursively (start at position 2 to skip the <GO> symbol and 1st opening bracket)\n self._create_subtree(tree, -1, emb, 2)\n return tree",
"def ids_to_tree(self, emb, postprocess=True):\n\n tree = TreeData()\n tokens = self.ids_to_strings(emb)\n\n for token in tokens:\n if token in ['<GO>', '<STOP>', '<VOID>']:\n continue\n tree.create_child(0, len(tree), NodeData(token, 'x'))\n\n return tree",
"def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()",
"def get_descendant_objective_id_terms(self):\n return # osid.search.terms.IdTerm",
"def create_seq_objs(self):\n sorted_gene_codes = sorted(list(self.gene_codes), key=str.lower)\n our_taxon_names = self.get_taxon_names_for_taxa()\n all_seqs = self.get_all_sequences()\n\n for gene_code in sorted_gene_codes:\n for code in self.voucher_codes:\n seq_obj = self.build_seq_obj(code, gene_code, our_taxon_names, all_seqs)\n if seq_obj is None:\n self.warnings += ['Could not find voucher {0}'.format(code)]\n continue\n self.seq_objs.append(seq_obj)",
"def __init__(self, id, left, right, parent, depth, kind, text, relname, relkind):\n\n\t\tself.id = id\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.right = right\n\t\tself.depth = depth\n\t\tself.kind = kind #edu, multinuc or span node\n\t\tself.text = text #text of an edu node; empty for spans/multinucs\n\t\tself.token_count = text.count(\" \") + 1\n\t\tself.relname = relname\n\t\tself.relkind = relkind #rst (a.k.a. satellite), multinuc or span relation\n\t\tself.sortdepth = depth\n\t\tself.children = []\n\t\tself.leftmost_child = \"\"\n\t\tself.dep_parent = \"\"\n\t\tself.dep_rel = relname\n\t\tself.tokens = []\n\t\tself.parse = \"\"",
"def __init__(self, node_id):\n # Assign ID and update class-counter\n self.id = node_id\n\n # Initialize\n self.is_sequence_end = False\n self.children = {}",
"def collect_children_by_id(self):\n self.children_by_id = {}\n self.root_by_id = {}\n self.ns_for_root_id = {}\n\n def recursive_fill_root_id(entry):\n root_id = self.root_by_id.get(entry.mount_id)\n if root_id is not None:\n return root_id\n\n if entry.parent_id == entry.mount_id:\n # self-referencing is a root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n parent_entry = self.items.get(entry.parent_id)\n if parent_entry is None:\n # The parent is unknown, so it is an implicit root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n root_id = recursive_fill_root_id(parent_entry)\n self.root_by_id[entry.mount_id] = root_id\n return root_id\n\n for entry in self.items.values():\n if entry.parent_id not in self.children_by_id:\n self.children_by_id[entry.parent_id] = {}\n self.children_by_id[entry.parent_id][entry.mount_id] = entry.abs_mount_point(no_question=True)\n root_id = recursive_fill_root_id(entry)\n if root_id not in self.ns_for_root_id:\n self.ns_for_root_id[root_id] = set()\n self.ns_for_root_id[root_id].add(entry.mount_ns)\n\n # Sanity check\n assert len(self.items) == len(self.root_by_id)",
"def parse_taxonomy( seq_id, lineage, key_dictionary ):\n\tif seq_id in sti_dict:\n\t\ttax_id = sti_dict[ seq_id ]\n\t\ttax_names = [ tax_id ] #list of taxon names\n\telse:\n\t\ttax_id = str( seq_id )\n\t\ttax_names = [ tax_id ] #list of taxon names\n\ttax_numbers = [ seq_id ]\n\tis_A_list = [] #store is_A relationships\n\n\twhile lineage != '1': #forces traversal through the tri file until we get to the root of taxonomy\n\t\t#print lineage\n\t\tif lineage == '0': #need this to process the root in the tri file. \n\t\t\tbreak\n\t\tis_A_list = [lineage] + is_A_list\n\t\ttax_numbers = [lineage] + tax_numbers\n\t\tif lineage in sti_dict: #we have the next taxonomic representative in the sti file\n\t\t\ttax_id = sti_dict[ lineage ]\n\t\t\ttax_names = [tax_id] + tax_names #append tax_id to front of list\n\t\telse: #the taxon does not have a sequence representative. \n\t\t\ttax_id = str( lineage ) \n\t\t\ttax_names = [tax_id] + tax_names\n\t\t#now process to next lineage\n\t\tlineage = tri_dict[ lineage ] \n\n\n\ttax_names = ['root'] + tax_names #append tax_id to front of list\n\ttax_numbers = [lineage] + tax_numbers\n\tis_A_list = ['0'] + [lineage] + is_A_list\n\n\t#now append all of these reuslts to the final dictionary, which will be keyed \n\t#off of the tax_numbers list (unique IDs for each taxonomic level.\n\n\tfor i in xrange( len( tax_numbers ) ):\n\t\tid = tax_numbers[i]\n\t\tif id in key_dictionary:\n\t\t\tpass\n\t\telse:\n\t\t\tparent = is_A_list[i]\n\t\t\tlevel = i #taxonomic level (how far down in levels are we?)\n\t\t\tnames = process_names( tax_names[:i+1] )\n\t\t\tkey_dictionary[ id ] = [ parent, level, names ]\n\n\treturn( key_dictionary )",
"def init_db(self, parent_type, child_type):\n self.parent = Node(self.handler, parent_type)\n self.children = [ Node(self.handler, child_type) for x in range(0, self.SIZE) ]\n for node in self.children:\n Link(self.handler, self.parent.node, node.node, child_type.upper())",
"def _addEntity(self, pid, chunks):\n parent = chunks[pid]\n sub = None\n # Find subject\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n if child.func in SubDict:\n sub = child\n if child.func == \"では\":\n if child.negative != 0 or any([val.negative != 0 for key, val in self.G.successors(child.main)]):\n pass\n else:\n sub = None\n if sub:\n self._addNode(parent, sub=sub.main)\n self._addEdge(sub.main, parent.main, label=\"陳述\", etype=\"stat\")\n else:\n self._addNode(parent)\n \n # Lopp through all children\n for i in range(len(parent.children)):\n child = chunks[parent.children[i]]\n # If child is noun\n if child.func in SubDict:\n if child.func == \"では\":\n if child.negative != 0 or any([val.negative != 0 for key, val in self.G.successors(child.main)]):\n pass\n else:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=child.func, etype=\"attr\")\n elif child.type == 0 and child.func in [\"と\", \"などと\"] and child.id + 1 == parent.id and preprocessText(chunks[parent.parent].main) not in [\"交代\", \"交換\"]:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=\"並列\", etype=\"para\")\n self._addEdge(parent.main, child.main, label=\"並列\", etype=\"para\")\n self.para.append([child.main, parent.main])\n elif child.type == 0 and child.func in ParallelDict and child.id + 1 == parent.id:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=\"並列\", etype=\"para\")\n self._addEdge(parent.main, child.main, label=\"並列\", etype=\"para\")\n self.para.append([child.main, parent.main])\n else:\n self._addNode(child)\n self._addEdge(child.main, parent.main, label=child.func, etype=\"attr\")",
"def _createQuestObjects(questIDs):\n return [questObject(q) for q in questIDs]",
"def fill_taxonomy_database(taxids, password):\r\n\r\n for taxid in taxids:\r\n lineage = ncbi.get_lineage(taxid)\r\n names = ncbi.get_taxid_translator(lineage)\r\n print(lineage)\r\n print([names[taxid] for taxid in lineage])\r\n\r\n previous = \"\"\r\n\r\n for lin in lineage:\r\n if int(lin) != 1: # skipping 'root'\r\n rank = ncbi.get_rank([lin])\r\n SQL_connection = set_connection(password)\r\n cursor = SQL_connection.cursor(buffered=True)\r\n cursor.execute(\r\n \"select * \"\r\n \"from Taxonomie \"\r\n \"where taxonomy_ID = {};\".format(\r\n lin))\r\n results = cursor.fetchone()\r\n if results is None:\r\n if previous == \"\":\r\n cursor.execute(\"insert into Taxonomie \"\r\n \"(rank_up, taxonomy_ID, naam, rang) \"\r\n \"values(NULL, {}, '{}', '{}');\".format(\r\n lin, names[lin], rank[lin]))\r\n SQL_connection.commit()\r\n else:\r\n cursor.execute(\"insert into Taxonomie \"\r\n \"(rank_up, taxonomy_ID, naam, rang) \"\r\n \"values({}, {}, '{}', '{}');\".format(\r\n previous, lin, names[lin], rank[lin]))\r\n SQL_connection.commit()\r\n cursor.close()\r\n SQL_connection.close()\r\n previous = lin",
"def new_branch_tree(tree, ids):\n branch_tree = {}\n branch_tree[\"selftext\"] = tree[\"selftext\"]\n branch_tree[\"title\"] = tree[\"title\"]\n branch_tree[\"id\"] = tree[\"id\"]\n branch_tree[\"comments\"] = {}\n for id in ids[1:]:\n branch_tree[\"comments\"][id] = tree[\"comments\"][id]\n return branch_tree",
"def ids_to_tree(self, emb, postprocess=True):\n\n tree = TreeData()\n tokens = self.ids_to_strings(emb)\n\n for token in tokens:\n if token in ['<GO>', '<STOP>', '<VOID>']:\n continue\n if postprocess:\n # casing (only if set to lowercase)\n if self.lowercase and len(tree) == 1 or tree.nodes[-1].t_lemma in ['.', '?', '!']:\n token = token[0].upper() + token[1:]\n # plural merging (if plural tokens come up)\n if token == '<-s>' and tree.nodes[-1].t_lemma is not None:\n token = self._singular_to_plural(tree.nodes[-1].t_lemma)\n tree.remove_node(len(tree) - 1)\n elif token == '<-s>':\n continue\n\n tree.create_child(0, len(tree), NodeData(token, 'x'))\n\n return tree",
"def build_go_terms(file):\n id_dic = {}\n\n handle = open(file, 'rb')\n attributes = reset_attributes()\n for line in handle:\n posn = line.find(\":\")\n if posn > 0:\n if line[:posn] in attributes:\n try:\n attributes[line[:posn]].append(line.strip()[posn+2:])\n except AttributeError:\n verbalise(\"R\", \"CANNOT APPEND TERM FROM LINE:\", line)\n verbalise(\"R\", \"\\n\".join([str(p) for p in attributes.items()]))\n exit()\n else:\n attributes[line[:posn]] = line.strip()[posn+2:]\n elif line[:2] == \"[T\":\n go_item = set_go_item(attributes)\n if go_item:\n id_dic[go_item.id] = go_item\n attributes = reset_attributes()\n else:\n go_item = set_go_item(attributes)\n if go_item:\n id_dic[go_item.id] = go_item\n\n handle.close()\n return id_dic",
"def make_drs_tree(self):\n pass",
"def _get_child_fields(self, cr, uid, ids, context=None):\n res = self.search(cr, uid, [('parent_id','child_of',ids)], context=context)\n return res",
"def _addChildren(self, pid, chunks):\n if chunks[pid].type in [0, -1]:\n self._addEntity(pid, chunks)\n else:\n self._addPredicate(pid, chunks)",
"def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents",
"def walk(self, oid):\n oid = str_to_oid(oid)\n result = []\n walker = Walker(self, oid,\n use_bulk=self.settings_read[\"use_bulk\"],\n bulk_rows=self.settings_read[\"bulk_rows\"])\n for rows in walker:\n result.extend(rows)\n return result",
"def get_descendant_agency_id_terms(self):\n return # osid.search.terms.IdTerm",
"def get_descendant_objective_bank_id_terms(self):\n return # osid.search.terms.IdTerm",
"def build(cls, records):\n children = []\n i = 2\n for _ in range(records[0]):\n j, child = cls.build(records[i:])\n i += j\n children.append(child)\n return (i + records[1]), cls(tuple(records[i:i+records[1]]), tuple(children))",
"def _traverse_1_0_1(item, nodes):\n if 'content' in item.keys():\n ids = []\n for node in item['content']:\n nodes[node['id']] = node\n ids.append(node['id'])\n _traverse_1_0_1(node, nodes)\n item['content'] = ids",
"def fix_ids(self, tree: nodes.document) -> None:\n def update_node_id(node: Element) -> None:\n \"\"\"Update IDs of given *node*.\"\"\"\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids\n\n for reference in tree.findall(nodes.reference):\n if 'refuri' in reference:\n m = self.refuri_re.match(reference['refuri'])\n if m:\n reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))\n if 'refid' in reference:\n reference['refid'] = self.fix_fragment('', reference['refid'])\n\n for target in tree.findall(nodes.target):\n update_node_id(target)\n\n next_node: Node = target.next_node(ascend=True)\n if isinstance(next_node, nodes.Element):\n update_node_id(next_node)\n\n for desc_signature in tree.findall(addnodes.desc_signature):\n update_node_id(desc_signature)",
"def _change_objs_to_IDs(self):\n if self.location:\n self.location = self.location.id\n if self.contents:\n self.contents = [obj.id for obj in self.contents]"
]
| [
"0.5755125",
"0.5338672",
"0.5329722",
"0.52761185",
"0.5210634",
"0.51479983",
"0.5139184",
"0.51210004",
"0.5115062",
"0.5050713",
"0.5016776",
"0.5008475",
"0.5002104",
"0.4972041",
"0.49581307",
"0.49481955",
"0.49471813",
"0.49283394",
"0.49226198",
"0.49141547",
"0.4890956",
"0.48722634",
"0.48681974",
"0.48554885",
"0.48370486",
"0.48327884",
"0.4818019",
"0.4815604",
"0.48137033",
"0.47840825"
]
| 0.7069697 | 0 |
Convert GO IDs in relationships to GO Term record objects. Populate children. | def _populate_relationships(self, rec_curr):
for relationship_type, goids in rec_curr.relationship.items():
parent_recs = set([self[goid] for goid in goids])
rec_curr.relationship[relationship_type] = parent_recs # replace GO ID with GO Term record object
for parent_rec in parent_recs:
if relationship_type not in parent_rec.relationship_rev:
parent_rec.relationship_rev[relationship_type] = set([rec_curr])
else:
parent_rec.relationship_rev[relationship_type].add(rec_curr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)",
"def transform_relationships(self, record):\n\n holdings_type = record.get_holdings_type()\n\n relationships = []\n\n # Category Entry (Lane) (R)\n for field in record.get_fields('655'):\n if field.indicator1 in '12':\n rb = RelationshipBuilder()\n\n # Name/Type\n rel_name = \"Category\"\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree\n rb.set_degree({'1': 'primary',\n '2': 'secondary'}.get(field.indicator1))\n\n # Enumeration: n/a\n # Chronology: n/a\n\n # Target\n rb.set_target(self.build_ref_from_field(field, CONCEPT))\n\n # Notes: n/a\n\n relationships.append(rb.build())\n\n\n # Collection/Location/Call Number (R)\n for field in record.get_fields('852'):\n rb = RelationshipBuilder()\n\n # Name/Type\n if 'b' not in field:\n logger.warning(f\"{record.get_control_number()}: loc code ($b) not found: {field}\")\n continue\n loc_code = field['b'].strip(' .').upper()\n rel_name = self.location_code_to_relator_map.get(loc_code, \"Access\")\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree: n/a\n\n # Enumeration\n # if not digital holdings, h/i are enum on rel to Place, else ignore\n if holdings_type != LaneMARCRecord.DIGITAL:\n # just concat??\n enum = ' '.join(field.get_subfields('h','i')).strip()\n rb.set_enumeration(tfcm.build_simple_ref(enum, STRING) if enum else None)\n\n # Chronology: n/a\n\n # Target\n rb.set_target(self.build_ref_from_field(Field('651',' 7',['a',loc_code]), PLACE))\n\n # Notes\n # map ind 1\n # ...\n # ...\n # ...\n for code, val in field.get_subfields('x','z', with_codes=True):\n rb.add_note(val,\n role = \"annotation\" if code == 'x' else \"documentation\")\n\n relationships.append(rb.build())\n\n # Electronic Location And Access (R)\n for field in record.get_fields('856'):\n rb = RelationshipBuilder()\n\n # Name/Type\n rel_name = field['e'] if 'e' in field else \\\n (\"Access\" if field.indicator2 in '01' else \"Related\")\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree: n/a\n # Enumeration: n/a\n # Chronology: n/a\n\n # Notes\n for code, val in field.get_subfields('9','i','r','x', with_codes=True):\n if code == 'x':\n val = \"Date verified: \" + val\n rb.add_note(val,\n role = \"annotation\" if code in 'irx' else \"documentation\")\n\n # Target\n wrb = WorkRefBuilder()\n\n # field should only have one y or z, but do all just in case.\n link_name = ' '.join(field.get_subfields('y','z')).strip()\n if not link_name:\n link_name = 'Link'\n wrb.add_name(link_name)\n wrb.set_link(link_name,\n href_URI = field['u'] )\n\n for val in field.get_subfields('q'):\n # take a guess at the qualifier type\n qualifier_type = Indexer.simple_element_type_from_value(val)\n if qualifier_type is None:\n qualifier_type = STRING\n wrb.add_qualifier(tfcm.build_simple_ref(val, qualifier_type))\n\n rb.set_target(wrb.build())\n\n relationships.append(rb.build())\n\n # Uniform Title Associated with Version (Lane) (R)\n for field in record.get_fields('963'):\n rb = RelationshipBuilder()\n\n # Name/Type\n rel_name = \"Related uniform title\"\n rb.set_name(rel_name)\n rb.set_type(self.get_relation_type(rel_name))\n\n # Degree: n/a\n # Enumeration: n/a\n\n # Chronology:\n for val in field.get_subfields('d','f'):\n rb.set_time_or_duration_ref(DateTimeParser.parse_as_ref(val))\n field.delete_all_subfields('d')\n field.delete_all_subfields('f')\n\n # Notes: n/a\n\n # these often link to work insts instead of auts, but\n # should be PARSED most similarly to e.g. bib 730 (aut)\n rb.set_target(self.build_ref_from_field(field, WORK_AUT))\n\n relationships.append(rb.build())\n\n return relationships",
"def _analyze_relationships(self):\n self._child_map = defaultdict(set)\n self._parent_map = defaultdict(set)\n\n for table, table_meta in self._metadata['tables'].items():\n if table_meta.get('use', True):\n for field_meta in table_meta['fields'].values():\n ref = field_meta.get('ref')\n if ref:\n parent = ref['table']\n self._child_map[parent].add(table)\n self._parent_map[table].add(parent)",
"def relationships(self):",
"def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }",
"def _get_and_create_relation_objects(self, root_node):\n relations = []\n\n for relation in root_node.iterdescendants(\"TLINK\"):\n lid = relation.get(\"lid\")\n\n # Get relation type as a string\n relation_type = relation.get(\"relType\")\n\n # Get relation_type_id\n relation_type_id = RelationType.get_id(relation_type)\n\n if not relation.get(\"timeID\") and not relation.get(\"relatedToTime\"):\n # This is event-event\n source_eiid = relation.get(\"eventInstanceID\")\n target_eiid = relation.get(\"relatedToEventInstance\")\n\n # Find source event\n source_obj = self.find_event_by_eiid(self.events, source_eiid)\n # Find target event\n target_obj = self.find_event_by_eiid(self.events, target_eiid)\n\n else:\n # This must be event-timex or timex-event or timex-timex\n target_tid = relation.get(\"relatedToTime\")\n target_eiid = relation.get(\"relatedToEventInstance\")\n\n source_tid = relation.get(\"timeID\")\n source_eiid = relation.get(\"eventInstanceID\")\n\n\n if source_tid and target_eiid:\n # timex-event\n source_obj = self.find_timex_by_tid(source_tid)\n target_obj = self.find_event_by_eiid(self.events, target_eiid)\n elif source_eiid and target_tid:\n # event-timex\n source_obj = self.find_event_by_eiid(self.events, source_eiid)\n target_obj = self.find_timex_by_tid(target_tid)\n elif source_tid and target_tid:\n # timex-timex\n source_obj = self.find_timex_by_tid(source_tid)\n target_obj = self.find_timex_by_tid(target_tid)\n\n relation_obj = Relation(lid, self.text_obj, source_obj, target_obj, relation_type_id)\n\n # So we don't run into problems with helper.output\n if relation_obj.is_timex_timex(): relation_obj.predicted_class = relation_type_id\n\n # There are sometimes duplicates which we do not want to have\n if relation_obj not in relations:\n relations.append(relation_obj)\n\n return relations",
"def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)",
"def _add_relationships(\n self,\n obj: BaseContent,\n relationships: List[graph.Relationship],\n nodes_to: List[graph.Node],\n ) -> None:\n for node_to, rel in zip(nodes_to, relationships):\n if not rel.start_node or not rel.end_node:\n raise ValueError(\"Relationships must have start and end nodes\")\n obj.add_relationship(\n RelationshipType(rel.type),\n RelationshipData(\n relationship_type=rel.type,\n source_id=rel.start_node.element_id,\n target_id=rel.end_node.element_id,\n content_item_to=self._id_to_obj[node_to.element_id],\n is_direct=True,\n **rel,\n ),\n )",
"def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node",
"def _add_all_level_relationships(\n self,\n session: Session,\n node_ids: Iterable[str],\n relationship_type: RelationshipType,\n marketplace: MarketplaceVersions = None,\n ):\n relationships: Dict[str, Neo4jRelationshipResult] = session.execute_read(\n get_all_level_packs_relationships,\n relationship_type,\n node_ids,\n marketplace,\n True,\n )\n nodes_to = []\n for content_item_relationship in relationships.values():\n nodes_to.extend(content_item_relationship.nodes_to)\n self._add_nodes_to_mapping(nodes_to)\n\n for content_item_id, content_item_relationship in relationships.items():\n obj = self._id_to_obj[content_item_id]\n for node in content_item_relationship.nodes_to:\n target = self._id_to_obj[node.element_id]\n source_id = content_item_id\n target_id = node.element_id\n if relationship_type == RelationshipType.IMPORTS:\n # the import relationship is from the integration to the content item\n source_id = node.element_id\n target_id = content_item_id\n obj.add_relationship(\n relationship_type,\n RelationshipData(\n relationship_type=relationship_type,\n source_id=source_id,\n target_id=target_id,\n content_item_to=target,\n mandatorily=True,\n is_direct=False,\n ),\n )",
"def _do_relation(self):\n if self.chunks:\n ch = self.chunks[-1]\n for relation, role in ch.relations:\n if role == \"SBJ\" or role == \"OBJ\":\n self.relations[role][relation] = ch\n if ch.type in (\"VP\",):\n self.relations[ch.type][ch.relation] = ch",
"def fixRelations (self):\n\t\tnodes = self.getFieldElements (\"relation\")\n\t\tif not nodes: return\n\t\t\n\t\tprint \"\\n%s\" % self.getId()\n\t\tfor r in nodes:\n\t\t\tvalue = XmlUtils.getText(r)\n\t\t\tif not value: return\n\t\t\tXmlUtils.setText (r,\"\")\n\t\t\tif value.startswith (\"http://\"):\n\t\t\t\tr.setAttribute (\"type\", \"Has part\")\n\t\t\t\tr.setAttribute (\"url\", value)\n\t\t\telse:\n\t\t\t\tr.setAttribute (\"type\", \"Is related\")\n\t\t\t\tr.setAttribute (\"title\", value)\n\t\t\tprint r.toxml()\n\t\tif 0:\n\t\t\tself.write()\n\t\t\tprint \"wrote record\"",
"def extract_emo_relations(self):\n for tweet_idx, tweet in enumerate(self.tweets):\n tweet_tokens = []\n idx2word, child2parent = {}, {}\n for word in tweet.rstrip().split('\\n'):\n if not word:\n sys.stderr.write(\"wat\")\n continue\n curr_word = Word(word.rstrip().split('\\t'), tweet_idx)\n idx2word[curr_word.idx] = curr_word\n child2parent[curr_word] = curr_word.parent\n\n # Isolate emotion words that are Verbs or Adjectives\n if curr_word.text in self.emo_kws and curr_word.pos in self.POS_LIST:\n self.tweet2emo[tweet_idx].append(curr_word)\n curr_word.is_emotion_word = True\n\n tweet_tokens.append(curr_word.text)\n\n # update tweet dictionary and add children to words\n self.add_relatives(child2parent, idx2word)\n tweet_text = \" \".join(tweet_tokens)\n self.idx2tweet[tweet_idx] = tweet_text\n\n # Create Tweet object\n self.add_tweet(tweet_idx, tweet_text, tweet_tokens, list(idx2word.values()))",
"def test_related_add_multiple_children(app, testdata):\n # Test language\n docs = testdata[\"documents\"]\n series = testdata[\"series\"]\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 0\n assert len(doc2.related.editions) == 0\n assert len(ser3.related.editions) == 0\n\n doc1.related.add_edition(doc2)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 1\n assert len(doc2.related.editions) == 1\n assert len(ser3.related.editions) == 0\n\n doc1.related.add_edition(ser3)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 2\n assert len(doc2.related.editions) == 2\n assert len(ser3.related.editions) == 2",
"def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related",
"def _subnode_ids(self):\n for ticket in self:\n ticket.subnode_ids = self.search([\n ('parent_id', '=', ticket.id),\n ('type.has_children', '=', True)])",
"def import_venture_relations(self, obj, ci):\n _replace_relations(\n obj, ci, 'child', 'data_center',\n self.datacenter_content_type, cdb.CI_RELATION_TYPES.REQUIRES,\n )\n _replace_relations(\n obj, ci, 'child', 'parent',\n self.venture_content_type, cdb.CI_RELATION_TYPES.CONTAINS,\n )",
"def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents",
"def parse_taxonomy( seq_id, lineage, key_dictionary ):\n\tif seq_id in sti_dict:\n\t\ttax_id = sti_dict[ seq_id ]\n\t\ttax_names = [ tax_id ] #list of taxon names\n\telse:\n\t\ttax_id = str( seq_id )\n\t\ttax_names = [ tax_id ] #list of taxon names\n\ttax_numbers = [ seq_id ]\n\tis_A_list = [] #store is_A relationships\n\n\twhile lineage != '1': #forces traversal through the tri file until we get to the root of taxonomy\n\t\t#print lineage\n\t\tif lineage == '0': #need this to process the root in the tri file. \n\t\t\tbreak\n\t\tis_A_list = [lineage] + is_A_list\n\t\ttax_numbers = [lineage] + tax_numbers\n\t\tif lineage in sti_dict: #we have the next taxonomic representative in the sti file\n\t\t\ttax_id = sti_dict[ lineage ]\n\t\t\ttax_names = [tax_id] + tax_names #append tax_id to front of list\n\t\telse: #the taxon does not have a sequence representative. \n\t\t\ttax_id = str( lineage ) \n\t\t\ttax_names = [tax_id] + tax_names\n\t\t#now process to next lineage\n\t\tlineage = tri_dict[ lineage ] \n\n\n\ttax_names = ['root'] + tax_names #append tax_id to front of list\n\ttax_numbers = [lineage] + tax_numbers\n\tis_A_list = ['0'] + [lineage] + is_A_list\n\n\t#now append all of these reuslts to the final dictionary, which will be keyed \n\t#off of the tax_numbers list (unique IDs for each taxonomic level.\n\n\tfor i in xrange( len( tax_numbers ) ):\n\t\tid = tax_numbers[i]\n\t\tif id in key_dictionary:\n\t\t\tpass\n\t\telse:\n\t\t\tparent = is_A_list[i]\n\t\t\tlevel = i #taxonomic level (how far down in levels are we?)\n\t\t\tnames = process_names( tax_names[:i+1] )\n\t\t\tkey_dictionary[ id ] = [ parent, level, names ]\n\n\treturn( key_dictionary )",
"def add_relatives(self, child2parent, idx2word):\n for child, parent in child2parent.items():\n if parent not in (0, -1):\n parent_word = idx2word[parent]\n parent_word.add_child(child)\n child.parent = parent_word",
"def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)",
"def init_db(self, parent_type, child_type):\n self.parent = Node(self.handler, parent_type)\n self.children = [ Node(self.handler, child_type) for x in range(0, self.SIZE) ]\n for node in self.children:\n Link(self.handler, self.parent.node, node.node, child_type.upper())",
"def Children(self) -> _n_1_t_2:",
"def gen_model(children, tree_structure):\n referenced = False\n extended = False\n for child in children:\n #print child.arg\n node = dict()\n extended = False\n if hasattr(child, 'substmts'):\n for attribute in child.substmts:\n # process the 'type' attribute:\n # Currently integer, enumeration and string are supported.\n if attribute.keyword == 'type':\n if len(attribute.arg.split(':'))>1:\n attribute.arg = attribute.arg.split(':')[-1]\n # Firstly, it is checked if the attribute type has been previously define in typedefs.\n if attribute.arg in TYPEDEFS:\n if TYPEDEFS[attribute.arg]['type'][:3] == 'int':\n node['type'] = 'integer'\n node['format'] = TYPEDEFS[attribute.arg]['format']\n elif TYPEDEFS[attribute.arg]['type'] == 'enumeration':\n node['type'] = 'string'\n node['enum'] = [e\n for e in TYPEDEFS[attribute.arg]['enum']]\n # map all other types to string\n else:\n node['type'] = 'string'\n elif attribute.arg[:3] == 'int':\n node['type'] = 'integer'\n node['format'] = attribute.arg\n elif attribute.arg == 'decimal64':\n node['type'] = 'number'\n node['format'] = 'double'\n elif attribute.arg == 'boolean':\n node['type'] = attribute.arg\n elif attribute.arg == 'enumeration':\n node['type'] = 'string'\n node['enum'] = [e[0]\n for e in attribute.i_type_spec.enums]\n # map all other types to string\n else:\n node['type'] = 'string'\n elif attribute.keyword == 'mandatory':\n parent_model = to_upper_camelcase(child.parent.arg)\n if parent_model not in PARENT_MODELS.keys():\n PARENT_MODELS[parent_model] = {'models':[],'discriminator':to_lower_camelcase(child.arg)}\n # Process the reference to another model.\n # We differentiate between single and array references.\n elif attribute.keyword == 'uses':\n if len(attribute.arg.split(':'))>1:\n attribute.arg = attribute.arg.split(':')[-1]\n\n ref = to_upper_camelcase(attribute.arg)\n ref = '#/definitions/' + ref\n if str(child.keyword) == 'list':\n node['items'] = {'$ref': ref}\n node['type'] = 'array'\n for attribute in child.substmts:\n if attribute.keyword == 'key':\n listkey = to_lower_camelcase(attribute.arg)\n if listkey:\n node['x-key'] = listkey\n referenced = True\n elif str(child.keyword) == 'grouping':\n ref = to_upper_camelcase(attribute.arg)\n if ref in tree_structure:\n PARENT_MODELS[ref]['models'].append(child.arg)\n list_properties = [item for item in tree_structure[ref]['properties']]\n ref = '#/definitions/' + ref\n node['allOf'] = []\n node['allOf'].append({'$ref': ref})\n index = 0\n for i in range(0, len(child.i_children)):\n #print len(child.i_children)\n if to_lower_camelcase(child.i_children[index].arg) in list_properties:\n del child.i_children[index]\n else:\n index+=1\n extended = True\n else:\n pending_models.append(child)\n else:\n node['$ref'] = ref\n referenced = True\n\n # When a node contains a referenced model as an attribute the algorithm\n # does not go deeper into the sub-tree of the referenced model.\n if not referenced :\n if not extended:\n node = gen_model_node(child, node)\n else:\n node_ext = dict()\n node_ext = gen_model_node(child, node_ext)\n node['allOf'].append( node_ext)\n extended = False\n\n # Leaf-lists need to create arrays.\n # Copy the 'node' content to 'items' and change the reference\n if child.keyword == 'leaf-list':\n ll_node = {'type': 'array', 'items': node}\n node = ll_node\n # Groupings are class names and upper camelcase.\n # All the others are variables and lower camelcase.\n if child.keyword == 'grouping':\n tree_structure[to_upper_camelcase(child.arg)] = node\n else:\n tree_structure[to_lower_camelcase(child.arg)] = node\n # TODO: do we really need this return value? We are working on the\n # reference anyhow.\n return tree_structure",
"def relations_to(self, end_node):",
"def import_device_relations(self, obj, ci):\n _replace_relations(\n obj, ci, 'child', 'venture',\n self.venture_content_type, cdb.CI_RELATION_TYPES.CONTAINS,\n )\n _replace_relations(\n obj, ci, 'child', 'venture_role',\n self.venture_role_content_type, cdb.CI_RELATION_TYPES.HASROLE\n )\n _replace_relations(\n obj, ci, 'child', 'parent',\n self.device_content_type, cdb.CI_RELATION_TYPES.CONTAINS\n )",
"def test_related_add_editions_to_child(app, testdata):\n doc1 = Document.get_record_by_pid(testdata[\"documents\"][0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(testdata[\"documents\"][1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(testdata[\"series\"][0][\"series_pid\"])\n\n doc1.related.add_edition(doc2)\n doc2.related.add_edition(ser3)\n\n parent_editions = doc1.related.editions\n child1_editions = doc2.related.editions\n child2_editions = ser3.related.editions\n\n assert len(parent_editions) == 2\n assert len(child1_editions) == 2\n assert len(child2_editions) == 2\n\n assert parent_editions[0] == doc2\n assert parent_editions[1] == ser3\n assert child1_editions[0] == doc1\n assert child1_editions[1] == ser3\n assert child2_editions[0] == doc2\n assert child2_editions[1] == doc1",
"def _go_terms_to_list(self, go_cat=None, set_go_terms=None, relation=\"parent\"):\n def rep(term, cat_str):\n return term.replace(\"level-\", \"\").replace(\"depth-\", \"\").replace(\" [{}]\".format(cat_str), \"\")\n if relation not in [\"parent\", \"child\"]:\n raise ValueError(\"'relation' must be parent or child\")\n list_relations = [[rep(x, self.dict_go_ns[go_cat]) for x in str(self.dict_go[term]).split(\"\\t\")] +\n [relation] for term in set_go_terms]\n return list_relations",
"def populate(data, model: models.Model) -> list:\n\n children = []\n\n for record in data:\n # Check for children\n if 'children' in record:\n children_data = record.pop('children', [])\n obj = model.objects.create(**record)\n obj_children = populate(children_data, model)\n for child in obj_children:\n child.parent = obj\n child.save()\n # If not children, just create\n else:\n obj = model.objects.create(**record)\n # Adding record\n children.append(obj)\n\n return children",
"def resolve_relations(self):\n\n log.debug(\"Start resolving relations\")\n for object_type in NetBoxObject.__subclasses__():\n\n for this_object in self.get_all_items(object_type):\n\n this_object.resolve_relations()\n\n log.debug(\"Finished resolving relations\")"
]
| [
"0.7713539",
"0.5963961",
"0.5717024",
"0.5698482",
"0.546695",
"0.5437619",
"0.5430837",
"0.54162246",
"0.53634787",
"0.5328484",
"0.53268766",
"0.52261734",
"0.52211857",
"0.5174679",
"0.5172715",
"0.5164123",
"0.513734",
"0.5097313",
"0.50481874",
"0.504175",
"0.50369143",
"0.50349957",
"0.5031127",
"0.5027027",
"0.5019966",
"0.49585733",
"0.4948376",
"0.49406055",
"0.49266946",
"0.49207544"
]
| 0.7230565 | 1 |
Set level, depth and add inverted relationships. | def _set_level_depth(self, optobj):
has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs
def _init_level(rec):
if rec.level is None:
if rec.parents:
rec.level = min(_init_level(rec) for rec in rec.parents) + 1
else:
rec.level = 0
return rec.level
def _init_depth(rec):
if rec.depth is None:
if rec.parents:
rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1
else:
rec.depth = 0
return rec.depth
def _init_reldepth(rec):
if not hasattr(rec, 'reldepth'):
up_terms = rec.get_goterms_upper()
if up_terms:
rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1
else:
rec.reldepth = 0
return rec.reldepth
for rec in self.values():
# Add invert relationships
if has_relationship:
if rec.depth is None:
_init_reldepth(rec)
# print("BBBBBBBBBBB1", rec.id, rec.relationship)
#for (typedef, terms) in rec.relationship.items():
# invert_typedef = self.typedefs[typedef].inverse_of
# # print("BBBBBBBBBBB2 {} ({}) ({}) ({})".format(
# # rec.id, rec.relationship, typedef, invert_typedef))
# if invert_typedef:
# # Add inverted relationship
# for term in terms:
# if not hasattr(term, 'relationship'):
# term.relationship = defaultdict(set)
# term.relationship[invert_typedef].add(rec)
# print("BBBBBBBBBBB3", rec.id, rec.relationship)
if rec.level is None:
_init_level(rec)
if rec.depth is None:
_init_depth(rec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_level(self):\n queue = []\n for node in self.node:\n if distance.euclidean(node.location, para.base) < node.com_ran:\n node.level = 1\n queue.append(node.id)\n while queue:\n for neighbor_id in self.node[queue[0]].neighbor:\n if not self.node[neighbor_id].level:\n self.node[neighbor_id].level = self.node[queue[0]].level + 1\n queue.append(neighbor_id)\n queue.pop(0)",
"def _add_graph_level(graph, level, parent_ids, names, scores):\n for i, parent_id in enumerate(parent_ids):\n new_node = (level, i)\n parent_node = (level - 1, parent_id)\n graph.add_node(new_node)\n graph.node[new_node][\"name\"] = names[i]\n graph.node[new_node][\"score\"] = str(scores[i])\n graph.node[new_node][\"size\"] = 100\n # Add an edge to the parent\n graph.add_edge(parent_node, new_node)",
"def set_depth(self, d):\r\n self.depth = d",
"def set_depth(self):\n if self.depth != None:\n return\n if not self.parents:\n self.depth = 0\n return\n for parent in self.parents:\n parent.set_depth()\n \n depths = [parent.depth for parent in self.parents]\n self.depth = max(depths) + 1",
"def traverse(self,node):\n self.ancestors[self.descendants[node]] = node\n for child in self.children[node]:\n self.traverse(child)\n self.descendants.union(child,node)\n self.ancestors[self.descendants[node]] = node\n self.visited.add(node)\n for query in self[node]:\n if query in self.visited:\n lca = self.ancestors[self.descendants[query]]\n self[node][query] = self[query][node] = lca",
"def __assign_level(vertex: \"Vertex\", level, already_assigned: \"List[Vertex]\"):\n vertex.level = level\n already_assigned.append(vertex)\n for neighbour in vertex.neighbours:\n if neighbour not in already_assigned:\n __assign_level(neighbour, level + 1, already_assigned)",
"def increment_depth(self):\r\n self.depth = self.depth + 1",
"def update_level(self):\n level = 1\n assigned_levels = set([])\n just_assigned = set([])\n for root in self.roots:\n for child in root.children:\n if child in just_assigned:\n continue\n child.level = level\n if len(child.children) == 0:\n continue\n just_assigned.add(child)\n assigned_levels = assigned_levels.union(just_assigned)\n\n level += 1\n leaves = [c for c in self.collectors if len(c.children) == 0]\n len_non_leaves = len(self.collectors) - len(leaves)\n self.update_level_for_non_leaves(\n level, assigned_levels, just_assigned, len_non_leaves\n )",
"def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node",
"def update(self):\n\n for node in self.nodes:\n for edge in node.edges:\n for i, edge_node in enumerate(edge.nodes):\n if edge_node.id != node.id:\n edge_node.add_edge(edge)\n\n return self",
"def setPresenceInLevels(self,external_reference_dic='No'): \n \n \n if external_reference_dic == 'No':\n # this will initialize the directory using the parent as level as reference\n logger.info('Using parent taxonomy as reference source for representing presences and absences in subsequent levels.')\n self.parent.setPresenceAbsenceFeature()\n dic = self.parent.presences.toDict()\n else:\n dic = external_reference_dic\n try: \n for level in self:\n level.setPresenceAbundanceData(dic)\n except:\n logger.error(\"The reference dictionary is not compatible.\")\n raise Exception(\"The reference dictionary is not compatible.\")",
"def set_right_edges(self):\n for v in self:\n for e in v.edges_list:\n e.linked[0]=v\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]\n for e in self.list_of_edges:\n e.linked[0]=self[self.search_index_by_coordinates(e.linked[0].coordinates)]\n e.linked[1]=self[self.search_index_by_coordinates(e.linked[1].coordinates)]",
"def _populate_relationships(self, rec_curr):\n for relationship_type, goids in rec_curr.relationship.items():\n parent_recs = set([self[goid] for goid in goids]) \n rec_curr.relationship[relationship_type] = parent_recs # replace GO ID with GO Term record object\n for parent_rec in parent_recs:\n if relationship_type not in parent_rec.relationship_rev:\n parent_rec.relationship_rev[relationship_type] = set([rec_curr])\n else:\n parent_rec.relationship_rev[relationship_type].add(rec_curr)",
"def set_depth(node, depth):\n setattr(node[0], \"depth\", depth)",
"def decrement_depth(self):\r\n self.depth = self.depth - 1",
"def invert(tree: nx.DiGraph) -> nx.DiGraph:\n new_tree = tree.copy()\n for node in new_tree.nodes:\n swap = np.empty(new_tree.nodes[node]['colinear_segments'].shape)\n swap[:, 1, :] = new_tree.nodes[node]['colinear_segments'][:, 0, :]\n swap[:, 0, :] = new_tree.nodes[node]['colinear_segments'][:, 1, :]\n new_tree.nodes[node]['colinear_segments'] = swap\n\n line_swap = np.empty((2, 2))\n old_line = new_tree.nodes[node]['line']\n line_swap[0] = old_line[1]\n line_swap[1] = old_line[0]\n new_tree.nodes[node]['line'] = line_swap\n\n for edge in new_tree.edges:\n new_tree.edges[edge]['position'] *= -1\n\n return new_tree",
"def add_children(self,node):\n\n node.parent_id = self.id\n node.level = self.level + 1\n node.path = node._create_path()\n node.save()",
"def add_relatives(self, child2parent, idx2word):\n for child, parent in child2parent.items():\n if parent not in (0, -1):\n parent_word = idx2word[parent]\n parent_word.add_child(child)\n child.parent = parent_word",
"def evert(self):\n for e in self.edges:\n self.invert()\n for f in self.faces:\n f.invert()",
"def create_all_menus(self, ):\n m = self._model\n if not m:\n return\n indizes = self._flatten_hierarchy(m)\n for i in indizes:\n self.create_menu_for_index(i)",
"def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)",
"def addLevel(self):\n pass",
"def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)",
"def reverse_graph(self):\n rgraph = DGraph()\n rgraph.graph = deepcopy(self.graph)\n\n for node in rgraph.graph:\n node.data.children, node.data.parents = node.data.parents, node.data.children\n\n return rgraph",
"def update_node2edge(self):\n self.node2edge = {e.child : e for e in self.edge}\n childrenset = set(self.node2edge.keys())\n rootset = set(e.parent for e in self.edge).difference(childrenset)\n if len(rootset) > 1:\n raise Warning(\"there should be a single root: \" + str(rootset))\n if len(rootset) == 0:\n raise Exception(\"there should be at least one root!\")\n self.root = rootset.pop()",
"def create_hierarchy(self):\n\t\tpass",
"def _do_relation(self):\n if self.chunks:\n ch = self.chunks[-1]\n for relation, role in ch.relations:\n if role == \"SBJ\" or role == \"OBJ\":\n self.relations[role][relation] = ch\n if ch.type in (\"VP\",):\n self.relations[ch.type][ch.relation] = ch",
"def svn_info_t_depth_set(svn_info_t_self, svn_depth_t_depth): # real signature unknown; restored from __doc__\n pass",
"def relationships(self):",
"def _add_node(self, parent, model, relation, reverse, related_name,\n accessor_name, nullable, depth):\n # Reverse relationships\n if reverse and '+' in related_name:\n return\n\n node_hash = self._nodes.get(model, None)\n\n # don't add node if a path with a shorter depth exists. this is applied\n # after the correct join has been determined. generally if a route is\n # defined for relation, this will never be an issue since there would\n # only be one path available. if a route is not defined, the shorter\n # path will be found\n if not node_hash or node_hash['depth'] > depth:\n if node_hash:\n node_hash['parent'].remove_child(model)\n\n node = ModelTreeNode(model, parent, relation, reverse,\n related_name, accessor_name, nullable, depth)\n\n self._nodes[model] = {\n 'parent': parent,\n 'depth': depth,\n 'node': node,\n }\n\n node = self._find_relations(node, depth)\n parent.children.append(node)"
]
| [
"0.5710649",
"0.5664993",
"0.5536199",
"0.5480017",
"0.5424314",
"0.52821314",
"0.52650774",
"0.5216432",
"0.51938105",
"0.5161485",
"0.51139736",
"0.5094936",
"0.5065531",
"0.505468",
"0.50471",
"0.49980313",
"0.49617708",
"0.49417603",
"0.49165037",
"0.49140632",
"0.49120986",
"0.49092573",
"0.4881217",
"0.48775825",
"0.48696566",
"0.48428783",
"0.48320606",
"0.4815068",
"0.4810304",
"0.4803616"
]
| 0.77841955 | 0 |
Given a GO ID, return the int value. | def id2int(go_id):
return int(go_id.replace("GO:", "", 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getInt(self, addr: ghidra.program.model.address.Address) -> int:\n ...",
"def getID(self) -> int:\n ...",
"def get_int(self, item: str) -> int:\n return int(self[item])",
"def id(self):\n if (len(self.value) > 1) and isinstance(self.value[1], int):\n return self.value[1]\n return -1",
"def _parse(self, the_id: typing.Union[int, str]) -> int:\n return int(the_id)",
"def to_number(self, id):\r\n if isinstance(id, int):\r\n return id\r\n else:\r\n return self.name_to_number(id)",
"def getDbInt(self, db, key):\n return int(self.getDbStr(db, key))",
"def getVehId(orgId):\n global vehId, vehIdDict\n value = vehIdDict.get(orgId, vehId)\n if value is vehId:\n vehIdDict[orgId] = vehId\n vehId = (vehId + 1) % 65500\n return value",
"def\tget_id(args):\n\tpath = args.config['book-path']\n\tdata = json.loads(open(path).read())\n\tif len(data) == 0:\n\t\treturn 0\n\torder_id = data[len(data) - 1]['id']\n\torder_id += 1\n\treturn order_id",
"def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0",
"def getInt(self, address: ghidra.program.model.address.Address) -> int:\n ...",
"def get_int(self, name):\n return self.field(name).toInt()[0]",
"def _get_id(self, id_family: str) -> int:\n res = self._id_manager[id_family]\n self._id_manager[id_family] += 1\n return res",
"def GetId(self):\n return int(self.id)",
"def id(self) -> int:\n return pulumi.get(self, \"id\")",
"def integer_id(self):\n id = self.id()\n if not isinstance(id, (int, long)):\n id = None\n return id",
"def myID() -> np.int:\r\n return 304976335",
"def myID() -> np.int:\r\n return 304976335",
"def getInteger(self):\n pass",
"def get_int(self, sect, opt):\r\n vstr = self.get_safe(sect, opt)\r\n try:\r\n return int(vstr)\r\n except ValueError:\r\n return 0",
"def HuntIDToInt(hunt_id):\n # TODO(user): This code is only needed for a brief period of time when we\n # allow running new rel-db flows with old aff4-based hunts. In this scenario\n # parent_hunt_id is effectively not used, but it has to be an\n # integer. Stripping \"H:\" from hunt ids then makes the rel-db happy. Remove\n # this code when hunts are rel-db only.\n if hunt_id.startswith(\"H:\"):\n hunt_id = hunt_id[2:]\n\n try:\n return int(hunt_id or \"0\", 16)\n except ValueError as e:\n raise HuntIDIsNotAnIntegerError(e)",
"def getInteger(self):\n return _libsbml.ASTNode_getInteger(self)",
"def getInteger(self):\n assert self._is_int is True\n return self._value",
"def get_organization_id(thing: object) -> t.OrganizationId:\n if isinstance(thing, int):\n return t.OrganizationId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.OrganizationId(int_id)\n except ValueError:\n raise err.InvalidOrganizationError(id=str(thing))",
"def getValue(self, o: ghidra.util.graph.KeyedObject) -> int:\n ...",
"def GetInteger(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetInteger(prompt)",
"def getint(self, section, option):\n return int(self.get(section, option))",
"def __int__(self):\n return self.get_raw_int()",
"def _num(self):\n try:\n num = int(self.__rId[3:])\n except ValueError:\n num = 9999\n return num",
"def get_id(self) -> int:\n return self.id"
]
| [
"0.62385684",
"0.621948",
"0.62051165",
"0.6159339",
"0.615776",
"0.6103479",
"0.6071196",
"0.5994222",
"0.59450656",
"0.5940004",
"0.59305966",
"0.59223413",
"0.58303654",
"0.58217436",
"0.5805286",
"0.57666236",
"0.5742528",
"0.5742528",
"0.57226086",
"0.5709645",
"0.56955683",
"0.5686844",
"0.5681014",
"0.5673104",
"0.56129515",
"0.5593861",
"0.5575359",
"0.557039",
"0.5549656",
"0.554513"
]
| 0.8050284 | 0 |
Returns all possible paths to the root node Each path includes the term given. The order of the path is top > bottom, i.e. it starts with the root and ends with the given term (inclusively). | def paths_to_top(self, term):
# error handling consistent with original authors
if term not in self:
sys.stderr.write("Term %s not found!\n" % term)
return
def _paths_to_top_recursive(rec):
if rec.level == 0:
return [[rec]]
paths = []
for parent in rec.parents:
top_paths = _paths_to_top_recursive(parent)
for top_path in top_paths:
top_path.append(rec)
paths.append(top_path)
return paths
go_term = self[term]
return _paths_to_top_recursive(go_term) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_paths(t, entry):\n paths = []\n if t.label == entry:\n return [[entry]]\n for b in t.branches:\n for p in find_paths(b, entry):\n paths.append([t.label] + p)\n return paths",
"def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths",
"def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths",
"def FindAllPaths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in graph:\n return None\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_paths(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths",
"def get_roots(self):\n roots = []\n for symbol in self.GlobalSymbolDict.values():\n if symbol.isRoot():\n roots += [symbol]\n return roots",
"def test_path_to_root(self):\n def go(id):\n return get_obj(self.session, self.model, id)\n\n node11 = go(11)\n node8 = go(8)\n node6 = go(6)\n node1 = go(1)\n path_11_to_root = node11.path_to_root(self.session).all()\n path_8_to_root = node8.path_to_root(self.session).all()\n path_6_to_root = node6.path_to_root(self.session).all()\n path_1_to_root = node1.path_to_root(self.session).all()\n self.assertEqual(path_11_to_root, [go(11), go(10), go(7), go(1)])\n self.assertEqual(path_8_to_root, [go(8), go(7), go(1)])\n self.assertEqual(path_6_to_root, [go(6), go(4), go(1)])\n self.assertEqual(path_1_to_root, [go(1)])\n\n asc_path_11_to_root = node11.path_to_root(self.session, order=asc).all()\n self.assertEqual(asc_path_11_to_root, [go(1), go(7), go(10), go(11)])",
"def roots(self) -> List[str]:\n return [node for node, degree in self.graph.in_degree() if degree == 0]",
"def hypernym_paths(self):\n paths = []\n hypernyms = self._direct_hypernyms\n if self.is_root():\n paths = [[self]]\n for hypernym in hypernyms:\n for ancestor_list in hypernym.hypernym_paths():\n ancestor_list.append(self)\n paths.append(ancestor_list)\n return paths",
"def _get_leaf_node_paths(t\n ):\n return {\n k: _get_leaf_node_path(k, t)\n for k, v in t.get_descendants().items()\n if isinstance(v.node, prensor.LeafNodeTensor)\n }",
"def path_from_root(self):\n path = []\n parent = self.parent\n child = self\n while parent is not None:\n if child is parent.left_subtree:\n path.append('left')\n else:\n path.append('right')\n child = parent\n parent = parent.parent\n \n path.reverse()\n return path",
"def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))",
"def path(self, root, n=10, stepsize=3):\n seq = []\n seq.append(root)\n while len(seq) < n:\n next = self.synonyms([seq[-1]], stepsize)\n random.shuffle(next)\n maxToAdd = stepsize\n added_something = False\n for j in next:\n if j not in seq:\n seq.append(j)\n added_something = True\n maxToAdd -= 1\n if maxToAdd <= 0:\n break\n if added_something is False:\n seq.append(root)\n return(seq[0:n])",
"def roots_of(self, kw):\n return [kkw for kkw in self.ancestry_of(kw) if not self.getNode(kkw).edgein]",
"def path_to_root(self):\n path = [self]\n if self.is_root():\n return path\n root = False\n current = self.parent\n\n while not root:\n path.append(current)\n if current.is_root():\n root = True\n else:\n current = current.parent\n\n return path",
"def get_paths_from(self, symbol):\n to_return = []\n visitation_queue = [self.head]\n while len(visitation_queue) != 0:\n visiting = visitation_queue.pop(0)\n for elem in visiting.children:\n visitation_queue.append(elem)\n if symbol in visiting.inputs:\n v = visiting\n model_trail = []\n while v.parent is not None:\n model_trail.append(v.m)\n v = v.parent\n to_return.append(SymbolPath(visiting.inputs, model_trail))\n return to_return",
"def paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree, return_indices=return_indices)\n return paths",
"def roots (self, reset=False):\n if reset or (self.__roots is None):\n self.__roots = set()\n for n in self.__nodes:\n if not (n in self.__reverseMap):\n self.__roots.add(n)\n return self.__roots",
"def get_roots(self, connection=None):\n\n connection = connection or self.engine.connect()\n\n return connection.execute(\n select(\n [self.nodes.c.title, self.nodes.c.id.label('descendant')]\n ).where(\n self.nodes.c.id.notin_(\n select([self.paths.c.descendant]).where(self.paths.c.depth > 0)\n )\n )\n )",
"def get_all_paths(coll, prefix_path=(), stop_at=None, stop_below=None):\n assert stop_at is None or stop_below is None, 'Only one of stop_at or stop_below can be used.'\n if stop_below is not None and stop_below in str(last(butlast(prefix_path))):\n return [[]]\n if stop_at is not None and stop_at in str(last(prefix_path)):\n return [[]]\n if isinstance(coll, dict) or isinstance(coll, Munch) or isinstance(coll, list):\n if isinstance(coll, dict) or isinstance(coll, Munch):\n items = iteritems(coll)\n else:\n items = enumerate(coll)\n\n return list(cat(map(lambda t: list(map(lambda p: [t[0]] + p,\n get_all_paths(t[1],\n prefix_path=list(prefix_path) + [t[0]],\n stop_at=stop_at,\n stop_below=stop_below)\n )),\n items))\n )\n else:\n return [[]]",
"def path_to_root(this_body, direct_orbits):\r\n path = []\r\n # if this_body is not in the list of keys for direct_orbits then it orbits nothing and we're at the root\r\n while this_body in direct_orbits:\r\n path.append(this_body)\r\n this_body = direct_orbits[this_body]\r\n # append the root\r\n path.append(this_body)\r\n return path",
"def calculate_paths(self):\n self.paths = {}\n for node in self.nodes:\n path = self.find_path_to_root(node)\n self.paths[node] = path\n self.path_dists[node] = [0.0] + [n.branch for n in path[1:]]",
"def get_all_leaf_paths(coll):\n if isinstance(coll, dict) or isinstance(coll, Munch):\n return list(cat(map(lambda t: list(map(lambda p: [t[0]] + p,\n get_all_leaf_paths(t[1])\n )),\n iteritems(coll)))\n )\n\n elif isinstance(coll, list):\n return list(cat(map(lambda t: list(map(lambda p: [t[0]] + p,\n get_all_leaf_paths(t[1])\n )),\n enumerate(coll)))\n )\n else:\n return [[]]",
"def find_all(self, prefix):\r\n\r\n def _find_all(trienode, mem, valid_words=[]):\r\n \"\"\"Return a list of valid words starting from trienode. mem is a \r\n string that is used to remember the word up until root.\"\"\"\r\n \r\n if trienode.data(): \r\n valid_words.append(mem)\r\n if trienode.children():\r\n for children in trienode.children():\r\n _find_all(trienode.children()[children], mem + children,\r\n valid_words)\r\n return valid_words\r\n # Return all words if prefix is empty string\r\n if prefix == '':\r\n return _find_all(self._root, prefix)\r\n if self.find_node(prefix):\r\n return _find_all(self.find_node(prefix), prefix)\r\n return []",
"def find_path(tree):\n results = []\n for neighbour in tree.neighbours:\n continue\n # TODO\n results.reverse()\n pass",
"def get_path_from_root(self):\n if not self.parent:\n return [self]\n return self.parent.get_path_from_root() + [self]",
"def find_all_paths(parents_to_children, start, end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return [path]\r\n if start not in parents_to_children.keys():\r\n return []\r\n paths = []\r\n for node in parents_to_children[start]:\r\n if node not in path:\r\n newpaths = find_all_paths(parents_to_children, node, end, path)\r\n for newpath in newpaths:\r\n paths.append(tuple(newpath))\r\n return paths",
"def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths",
"def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths",
"def get_node_paths_by_full_object(self, name):\n components = re.split('[\\.:]', name)\n cur_node = self.top\n paths = []\n\n # Handle a case where we may have split things up by wildcard\n if '_' in components[0]:\n (left, right) = components[0].rsplit('_', 1)\n test_name = '{}_*'.format(left.lower())\n if test_name in cur_node.children:\n cur_node = cur_node.children[test_name]\n paths.append(cur_node)\n if len(components) == 1 and components[0][-1] == '*':\n return paths\n\n # Now iterate\n for component in components:\n cur_node = cur_node.children[component.lower()]\n paths.append(cur_node)\n\n # Return the list\n return paths",
"def find_path(self, start):\n path = []\n leaf = start\n seen_nodes = []\n while True:\n if self.nodes[leaf]['address'] == '':\n return path\n\n left = leaf if self.nodes[leaf][\n 'left'] else self.nodes[leaf]['sibling']\n right = leaf if not self.nodes[leaf][\n 'left'] else self.nodes[leaf]['sibling']\n next_hash = do_hash(left + right, self.algo)\n leaf = self.nodes[leaf]['parent']\n assert leaf == next_hash\n assert next_hash not in seen_nodes\n assert next_hash in self.nodes\n step = [left, right, next_hash]\n path.append(step)"
]
| [
"0.5928094",
"0.58579594",
"0.5771403",
"0.572775",
"0.5664198",
"0.56338614",
"0.5608132",
"0.5601973",
"0.55823183",
"0.55173934",
"0.55136275",
"0.55092233",
"0.5503546",
"0.5469566",
"0.544573",
"0.5440878",
"0.5434561",
"0.5434489",
"0.5410268",
"0.5402697",
"0.5397204",
"0.53961766",
"0.53911984",
"0.53657836",
"0.53271145",
"0.5322619",
"0.5315941",
"0.53129727",
"0.5307549",
"0.5301833"
]
| 0.6615518 | 0 |
Add the GO parents of a gene's associated GO IDs to the gene's association. | def update_association(self, association):
bad_goids = set()
# Loop through all sets of GO IDs for all genes
for goids in association.values():
parents = set()
# Iterate thru each GO ID in the current gene's association
for goid in goids:
try:
parents.update(self[goid].get_all_parents())
except:
bad_goids.add(goid.strip())
# Add the GO parents of all GO IDs in the current gene's association
goids.update(parents)
if bad_goids:
sys.stdout.write("{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\n".format(
N=len(bad_goids), GOs=" ".join(bad_goids))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addGeneAssociation(self, *args):\n return _libsbml.FbcModelPlugin_addGeneAssociation(self, *args)",
"def add_gene_ids(self, genes_list):\n orig_num_genes = len(self.genes)\n\n for g in list(set(genes_list)):\n if not self.genes.has_id(g):\n new_gene = GenePro(id=g, pdb_file_type=self.pdb_file_type, root_dir=self.genes_dir)\n if self.model:\n self.model.genes.append(new_gene)\n else:\n self.genes.append(new_gene)\n\n log.info('Added {} genes to GEM-PRO project'.format(len(self.genes)-orig_num_genes))",
"def add_parents(self, sample, fitness, max_parent_per_capita=1.0):\n\n assert isinstance(max_parent_per_capita, Number) and 0 <= max_parent_per_capita <= 1.0\n self.size = len(sample)\n max_parent_size = int(max_parent_per_capita * self.size)\n\n probabilities = np.cos(fitness) ** 2\n r = np.random.random(size=self.size)\n parents = sample[r < probabilities]\n\n parent_size = min(parents.shape[0], max_parent_size)\n split = parent_size // 2\n\n self.father = parents[:split]\n self.mother = parents[split: parent_size]",
"def set_parents(self):\n route53 = self.pcf_field.get_particles(flavor=\"route53_record\")\n route53_record_pcf_name = route53.get(\"pcf_name\", self.name)\n ec2_particles = self.pcf_field.get_particles(flavor=\"ec2_instance\")\n\n self.pcf_field.particles[\"route53_record\"][route53_record_pcf_name].parents.update(list(ec2_particles.values()))\n self.pcf_field.link_particles(self.pcf_field.particles)",
"def add_children_to_parents(self, mutated_pop_dict, mating_pop_dict):\n\n print('Combining parent and child generations')\n\n merged_networks_dict = OrderedDict()\n\n for id, G in mutated_pop_dict.items():\n new_id = ''.join(\n [random.choice(string.ascii_letters + string.digits)\n for i in range(10)]\n )\n merged_networks_dict[new_id] = copy.deepcopy(G)\n for id, G in mating_pop_dict.items():\n merged_networks_dict[id] = copy.deepcopy(G)\n\n return merged_networks_dict",
"def _populate_relationships(self, rec_curr):\n for relationship_type, goids in rec_curr.relationship.items():\n parent_recs = set([self[goid] for goid in goids]) \n rec_curr.relationship[relationship_type] = parent_recs # replace GO ID with GO Term record object\n for parent_rec in parent_recs:\n if relationship_type not in parent_rec.relationship_rev:\n parent_rec.relationship_rev[relationship_type] = set([rec_curr])\n else:\n parent_rec.relationship_rev[relationship_type].add(rec_curr)",
"def addGene(self, *args):\n return _libsbml.Association_addGene(self, *args)",
"def update_k_association(self, association, k):\n if k is None:\n self.update_association(association)\n else:\n bad_goids = set()\n # Loop through all sets of GO IDs for all genes\n for goids in association.values():\n parents = set()\n # Iterate thru each GO ID in the current gene's association\n for goid in goids:\n try:\n cur_parents = self[goid]._parents\n for i in range(0, k):\n # Add cur_parents \n parents.update(cur_parents)\n # Update cur_parents so we can operate recursively\n new_cur_parents = set()\n for cur_parent in cur_parents:\n new_cur_parents.update(self[cur_parent]._parents)\n cur_parents = new_cur_parents\n except:\n bad_goids.add(goid.strip())\n # Add the GO parents of all GO IDs in the current gene's association\n goids.update(parents)\n if bad_goids:\n sys.stdout.write(\"{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\\n\".format(\n N=len(bad_goids), GOs=\" \".join(bad_goids)))",
"def getListOfGeneAssociations(self, *args):\n return _libsbml.FbcModelPlugin_getListOfGeneAssociations(self, *args)",
"def add_parents(self, nodes):\n # Check that nodes is a list/tuple of BaseNode objects\n if (isinstance(nodes, (list, tuple)) and\n all([isinstance(node, BaseNode) for node in nodes])):\n for node in nodes:\n self.add_parent(node)\n else:\n raise TypeError('add_parents() is expecting an iterable of '\n 'Job and/or Dagman objects')\n\n return self",
"def link_genes(self, genes: List[Gene]):\n\n # do a double-check to make sure we don't add duplicate genes\n for gene in genes:\n if gene.locus_tag is not None:\n if gene.locus_tag not in [gene.locus_tag for gene in self.genes]:\n self.genes.append(gene)\n gene.link_transcription_unit(self)\n elif gene.id is not None:\n if gene.id not in [gene.id for gene in self.genes]:\n self.genes.append(gene)\n gene.link_transcription_unit(self)",
"def get_parents(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_parents()\n return set_parents",
"def link_genes(self, genes: List[Gene]):\n for gene in genes:\n if gene.locus_tag not in [gene.locus_tag for gene in self.genes]:\n self.genes.append(gene)\n gene.link_i_modulon(self)",
"def update_parents(self):\n for a_parent in self.parents:\n for child in self.children:\n for a_dest in self.children[child]:\n if (a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]) not in a_parent.children[child]:\n a_parent.children[child].append((a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]))\n a_parent.update_parents()",
"def include_parents():\n suffix = uuid4().hex\n\n click.secho('*** Creating Genres for Movie...', fg='green')\n _horror = _make_document('genre', name='Horror - %s' % suffix)\n click.secho(json.dumps(_horror, indent=2, sort_keys=True), fg='yellow')\n\n _monster = _make_document('genre', name='Monster - %s' % suffix, parent=_horror['_id'])\n click.secho(json.dumps(_monster, indent=2, sort_keys=True), fg='yellow')\n\n _vampire = _make_document('genre', name='Vampire - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_vampire, indent=2, sort_keys=True), fg='yellow')\n\n _werewolf = _make_document('genre', name='Werewolf - %s' % suffix, parent=_monster['_id'])\n click.secho(json.dumps(_werewolf, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating Movie with genres `Werewolf` and `Vampire`, parent genres should be auto-filled...', fg='green')\n twilight = _make_document('movie', title='Twilight', genres=[_vampire['_id'], _werewolf['_id']])\n click.secho(json.dumps(twilight, indent=2, sort_keys=True), fg='yellow')",
"def set_parents_table(self) -> None:\n self.parents[\"A\"] = \"start\"\n self.parents[\"B\"] = \"start\"\n self.parents[\"fin\"] = None",
"def get(self, *args):\n return _libsbml.ListOfGeneAssociations_get(self, *args)",
"def find_parents(self) -> None:\n self.referers: Dict[str, List[Har2Tree]] = defaultdict(list)\n for hartree in self.hartrees:\n if hartree.root_referer:\n self.referers[hartree.root_referer].append(hartree)",
"def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)",
"def getGeneAssociation(self, *args):\n return _libsbml.FbcModelPlugin_getGeneAssociation(self, *args)",
"def createGeneAssociation(self):\n return _libsbml.FbcModelPlugin_createGeneAssociation(self)",
"def link(self, hps):\n for hp in self._parent_hps:\n parent = hps[hp]\n self.parents.add(parent)\n parent.children.add(self)",
"def clone(self):\n return _libsbml.ListOfGeneAssociations_clone(self)",
"def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)",
"def make_parents(self):\r\n self.parents = []\r\n \r\n for loopindex in range(0, int(self.population_size * 0.6)):\r\n while True:\r\n if loopindex < int(self.population_size * 6 / 15):\r\n parent = random.choice(self.best_districts)\r\n else:\r\n parent = random.choice(self.worst_districts)\r\n \r\n if parent not in self.parents:\r\n self.parents.append(parent)\r\n break",
"def find_parents(G):\n\tpa = {}\n\tfor u in G:\n\t\tfor v in G[u]:\n\t\t\tif not pa.has_key(v):\n\t\t\t\tpa[v] = [u]\n\t\t\telse:\n\t\t\t\tpa[v].append(u)\n\treturn pa",
"def add_mod_interaction_links(self, gene_id):\n xref_dict = {}\n page = 'gene/MODinteractions_genetic'\n\n individual_prefix, individual_body, _ = self.etlh.rdh2.split_identifier(gene_id)\n individual_url = self.etlh.rdh2.return_url_from_identifier(gene_id, page)\n\n # Exception for MGI\n if individual_prefix == 'MGI':\n xref_dict['displayName'] = gene_id\n xref_dict['id'] = gene_id\n xref_dict['globalCrossRefId'] = gene_id\n xref_dict['primaryKey'] = gene_id + page\n else:\n xref_dict['displayName'] = individual_body\n xref_dict['id'] = individual_body\n xref_dict['globalCrossRefId'] = individual_body\n xref_dict['primaryKey'] = individual_body + page\n\n xref_dict['prefix'] = individual_prefix\n xref_dict['localId'] = individual_body\n xref_dict['crossRefCompleteUrl'] = individual_url\n xref_dict['uuid'] = str(uuid.uuid4())\n xref_dict['crossRefType'] = page\n xref_dict['page'] = page\n xref_dict['reference_uuid'] = str(uuid.uuid4())\n\n# For matching to the gene when creating the xref relationship in Neo.\n xref_dict['dataId'] = gene_id\n # Add the gene_id of the identifier to a global list so we don't create unnecessary xrefs.\n self.successful_mod_interaction_xrefs.append(gene_id)\n\n return xref_dict",
"def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))",
"def vcf_add_gene(vrecs,gi_by_chr_map):\n def _overlap(gi,vr):\n return gi['end_i'] >= vr.start and gi['start_i'] <= vr.end\n def _genes_for_vr(vr,gi_by_chr_map):\n cm = gi_by_chr_map[vr.CHROM]\n genes = [ gi['gene'] for gi in cm if _overlap(gi,vr) ] \n return genes\n for vr in vrecs:\n vr.genes = _genes_for_vr(vr,gi_by_chr_map)",
"def find_parents(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].parents = []\r\n for i in range(len(self.vertices)):\r\n for child in self.vertices[i].children:\r\n if i not in self.vertices[child].parents:\r\n self.vertices[child].parents.append(i)"
]
| [
"0.62660384",
"0.6137538",
"0.60173213",
"0.59491855",
"0.5879606",
"0.5706948",
"0.56995",
"0.569761",
"0.56316316",
"0.5612094",
"0.55072606",
"0.5487092",
"0.54207236",
"0.5380436",
"0.53702456",
"0.5355048",
"0.53539014",
"0.5352989",
"0.5327009",
"0.53183895",
"0.5301174",
"0.52955335",
"0.52864933",
"0.52841735",
"0.52801085",
"0.5276887",
"0.52535444",
"0.52500105",
"0.5247897",
"0.5247797"
]
| 0.6832396 | 0 |
Fetches all links to SOTU speeches from the essay page. | def get_links() -> list:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
}
p = re.compile(r'\d+.html')
base_url = 'http://stateoftheunion.onetwothree.net/texts/'
essay_url = base_url + 'index.html'
res = requests.get(essay_url, headers=headers)
soup = BeautifulSoup(res.content, 'html')
links = soup.find_all('a')
sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}
return sotu_links | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_speeches(sotu_links: dict):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n for name, link in sotu_links.items():\n with open(f'speeches/{slugify(name)}.txt', 'w') as fout:\n fout.write(clean_sotu(link, headers))",
"def handout_links(self):\r\n return self.q(css='section.handouts ol li a').map(lambda el: el.get_attribute('href')).results",
"def scrape_all_songs():\n print('Scraping all songs from {}'.format(URL))\n\n soup = scrapekit.handle_url(URL)\n song_elements = []\n tables = soup.findAll('table')\n\n for t in tables:\n field_index = scrapekit.get_col_index(t, field_name=\"Song\")\n\n if field_index:\n song_elements.extend(scrapekit.scrape_table_col(t, field_index))\n\n links = []\n for element in song_elements:\n l = element.find('a')\n if l:\n links.append(PREFIX + l.attrs.get('href', ''))\n return links",
"def get_subjects_IOP_urls(url):\n # f = open(\"test.txt\", 'a+')\n body = getBody(url)\n\n html = soup(body,'html.parser')\n # print(html.original_encoding)\n div_content = html.find(id=\"content\")\n a_elems = div_content.find_all(\"a\", recursive=True, class_=\"entry-image-post-link\".encode('utf-8'))\n hrefs = []\n for a in a_elems:\n hrefs.append(a[\"href\"])\n return hrefs",
"async def get_article_links(self):\n urls = []\n for page in range(self._start, self._end+1):\n urls.append(self._searchURL + str(page))\n result_list = await self._connect(urls)\n\n self._urls = []\n hares_links = []\n for result in result_list:\n soup = result[1]\n search_links = soup.find_all(class_='search-title')\n article_links = re.findall(r'url=(.*?)\\\"', str(search_links))\n for l in article_links:\n l = unquote(l)\n if 'hare48.pixnet.net' in l:\n hares_links.append(l)\n else:\n self._urls.append(l)\n self._urls.extend(await self._transform_hares(hares_links))",
"def scrape_the_athletic():\n\n r = requests.get(f'{url}/author/james-pearce/', headers=HEADERS).text\n soup = BeautifulSoup(r, 'lxml')\n\n latest_articles = soup.find_all(attrs={\"data-object-type\": \"article\", \"class\": \"col-sm-3\"})\n\n latest_article_links = [latest_article.a['href'] for latest_article in latest_articles]\n\n\n for link in latest_article_links:\n link = f\"{url}{link}\"\n r = requests.get(link, headers=HEADERS).text\n soup = BeautifulSoup(r, 'lxml')\n\n para = extract_paratext(soup)\n text = extract_text(para)\n\n if not text:\n continue\n\n yield f'{text} {link}'",
"def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError",
"def get_all_grammy_pages():\n res = urllib2.urlopen(GRAMMY_DATA_URL)\n html = res.read()\n\n lines = [line.strip() for line in html.split(\"\\n\") if \"More Winners\" in line]\n urls = [re.search('\\\".*\\\"',line).group(0).replace('\"','') for line in lines]\n return urls",
"def _get_ensemble_apt_urls(self, test=False):\n\n browser = self._browser\n\n cookie = browser.find_element_by_xpath('//*[@id=\"__layout\"]/div/div[3]/div/div[2]/button[1]')\n cookie.click()\n apt_urls = []\n\n try:\n while True:\n time.sleep(5)\n blocks = browser.find_elements_by_xpath(\"//div[@class='listings-card']//script[@type='application/ld+json']\")\n\n for block in blocks:\n jblock = json.loads(block.get_attribute('innerHTML'))\n url = jblock[1]['url']\n apt_urls.append(url)\n btn_next = WebDriverWait(browser, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"__layout\"]/div/div[2]/main/div/section[2]/div[2]/div/div[2]/div/button[last()]'))\n )\n btn_next.click()\n\n if test:\n self._browser.close()\n break\n except:\n self._browser.close()\n pass\n return apt_urls",
"async def get_all_relevant_subpages(session, main_page):\n url = f'https://{main_page}'\n content = await get_page(session, url)\n\n soup = BeautifulSoup(content, features=\"html.parser\")\n links = [link.get('href') for link in soup.find_all('a', attrs={'href': re.compile(\"^http\")})]\n relevant_links = [link for link in links if main_page in link]\n\n return relevant_links",
"def get_scrapps(self): \n scrapps = []\n self.validate_url()\n soup = self.get_content()\n links = soup.find_all(\"a\")\n table = soup.find_all('div',attrs={\"class\" : \"ph-person-home person-section\"})\n scrapp = Scrapp()\n for tag in table:\n try:\n text = tag.text.replace(\"\\n\",\" \").replace(\"\\r\",\" \").replace(\"\\xa0\",\" \")\n scrapp.add_meta('text',text)\n except KeyError:\n continue\n for link in links:\n try:\n if 'orcid' in link.attrs['href']:\n scrapp.add_meta(\"orcid_link\", link.attrs['href'])\n if \"researcherid\" in link.attrs['href']:\n scrapp.add_meta(\"researchid_link\", link.attrs['href'])\n if \"scholar.google\" in link.attrs['href']:\n scrapp.add_meta(\"googlescholar_link\", link.attrs['href'])\n except KeyError:\n # not all 'a' tags have the links we want\n continue\n scrapps.append(scrapp)\n return scrapps",
"def getPublicEMPDownloadLinks(self):\n try:\n studies = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_public_emp_studies', [results])\n for row in results:\n # study_id, project_name, file_path, study_abstract\n studies.append((row[0], row[1], row[2], row[3]))\n return studies\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)",
"def tekstowo_artist(url, save):\n page_iterator(url, save, tekstowo_song)",
"def scrape_sections(url):\n driver = open_browser(url)\n scroll_to_bottom(driver, 3)\n table = driver.find_element_by_xpath(\"//table[@class='views-table cols-4']/tbody\")\n entries = table.find_elements_by_tag_name('tr')\n\n grammy_entries = construct_grammy_entries(entries)\n driver.quit()\n \n return grammy_entries",
"def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list",
"async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)",
"def fetch_content_page(driver, url):\n driver.get(url)\n post_urls = [e.get_attribute('href') for e in driver.find_elements_by_xpath(\"//div[@class='ride_list']/a\")]\n return post_urls",
"def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poems associated with each poet\n nextpagelink = u''.join(sresponse.xpath('//a[@title = \"Go to next page\"]/@href').extract())\n\n table_poems = sresponse.xpath('//tbody/tr')\n\n #poetry.org does not provide text for all of the poems available, some links are for audio versions only,\n #therefore need to avoid storing poemitems that are not text\n regex = re.compile(r'audio')\n\n for row in table_poems:\n if len(row.xpath('td/a/@href').extract()[0]) > 0 :\n poemlink = u''.join(row.xpath('td/a/@href').extract()[0])\n linktext = str(poemlink)\n if regex.search(linktext) is None:\n if len(row.xpath('td//text()').extract())>0:\n poemitem = PoemItem()\n poemitem['poet_poems_url'] = poet_poems_url\n poemitem['poem_yrpub'] = row.xpath('td//text()').extract()[1]\n poemitem['poem_title'] = row.xpath('td//text()').extract()[4]\n poemitem['poem_link'] = urlparse.urljoin(\"http://www.poets.org\",poemlink)\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",poemlink),\n callback=self.parse_poet_poem, meta={'poemitem': poemitem})\n\n #if more poems on next page, use this method again\n if len(nextpagelink) > 0:\n yield scrapy.Request(url = urlparse.urljoin(\"http://www.poets.org\",nextpagelink),\n callback=self.parse_poet_poems, meta= {'poet_poems_url': poet_poems_url})",
"def get_hyperlinks(url, header, empty_list):\n response = requests.get(url, headers=header)\n soup = BeautifulSoup(response.text, 'html.parser')\n \n# table holds the preview of every job posting\n job_table = soup.find_all('h2', {'mb4 fc-black-800 fs-body3'})\n \n for job in job_table:\n# retrieving every link\n job_link_html = job.find_all('a', href=True)\n \n for job_link in job_link_html:\n empty_list.append('https://stackoverflow.com'+ job_link['href'])\n \n return empty_list",
"def get_all_page(url: str) -> list:\n url_book = get_url_book(url)\n return url_book",
"def _getLinks(self, response, soup):\n links = []\n for anchor in soup.find_all('a'):\n href = anchor.get('href')\n # Convert relative href to full uri\n if href and href.startswith(\"/\"):\n href = response.urljoin(href)\n else:\n continue\n links.append(href)\n return links",
"def get_links(self, soup):\n \"\"\" @param soup: BeautifulSoup object that cointains the targeted links \"\"\"\n \"\"\" @type soup: BeautifulSoup object \"\"\"\n for link in soup.select('a[href^=\"https://\"]'): # All links which have a href element\n href = link.get('href') # The actually href element of the link\n if not any(href.endswith(x) for x in ['.csv', '.xls', '.xlsx']):\n print(\"No excel\")\n continue\n if not href in self.url_queue:\n self.url_queue.append(href) # Add the URL to our queue",
"def parse(self, response):\n for link in response.css(\".event-entry .event-title a::attr(href)\").extract():\n yield scrapy.Request(\n response.urljoin(link), callback=self.parse_event_page, dont_filter=True\n )",
"def _get_apt_urls_per_page(self, soup):\n\n # identify the tag that contains apt URL\n apartments = soup.find_all('div', class_='listing-item__tab-content')\n apt_urls = [apt.find('a')['href'] for apt in apartments]\n # formulate a complete apartment URL\n apt_urls = [f'{CONST.ELLIMAN_HEADER}{url}' for url in apt_urls]\n return apt_urls",
"def get_study_data(self, soup, url):\n pass",
"def puxa_link(soup):\n link = []\n for item in soup.select('.listing-item__title'):\n link.append(item.a.get('href'))\n return link",
"def __TopicsPages(self):\r\n \r\n try:\r\n #apro l'url\r\n pagina=self.__OpenPage(self.__urlYahooCurrentAnswer)\r\n if not self.__MetaOperazioni(pagina): \r\n return False \r\n #gestisco l'eventuale errore di url\r\n if not pagina:\r\n #print 'La ricerca non ha prodotto risultati'\r\n return False\r\n else:\r\n #ora per il numero di risultati che voglio estrarre\r\n #1- estraggo i risultati dalla pagina\r\n #2 estraggo le altre pagine\r\n \r\n indexpages=1\r\n pag=[]\r\n \r\n while True:\r\n #devo iterare tra tutte le pagine fino a che ho i risultati, \r\n #le pagine esisteranno sempre dato che ho impostato il numero di risultati consultabili al max come i \r\n #risultati totali ottenuti\r\n topicrel=pagina.findAll('div',{'class':'dynamic'})\r\n #IN OGNIUNA C HO IL PEZZO DA CUI ESTRARRE LE INFORMAZIONI RELATIVE A N RISP, LINK, CATEGORIA ECC...\r\n for c in topicrel[0].findAll('li'):\r\n #d è una variabile temporanea \r\n \r\n #per prima cosa identifico il tipo in cui è stata strutturata la domanda\r\n #tipo 0: no badge\r\n #tipo 1: badge-o\r\n \r\n asktitle=c.h3.text\r\n askbody=c.span.text\r\n asktitle=asktitle.strip()\r\n askbody=askbody.strip()\r\n #se il corpo della domanda è vuoto lo sostituisco con il titolo\r\n if askbody==u'':askbody=asktitle\r\n \r\n tipo=c.findAll('span',{'class':'badge-o'})\r\n \r\n if tipo==[]: \r\n #print 'tipo 0'\r\n \r\n d=c.findAll('a')\r\n \r\n paginarisposte=d[0]['href']\r\n paginarisposte=unicode(paginarisposte,'UTF-8')\r\n \r\n _url=self.__language+self.__urlYahoo[:-1]\r\n \r\n paginarisposte=_url+paginarisposte\r\n \r\n askcategoria=d[1].text #categoria/e\r\n askcategoria=askcategoria.strip()\r\n askcategorialink=d[1]['href'] #indirizzo categoria\r\n _url=self.__language+self.__urlYahoo[:-1]\r\n \r\n askcategorialink=unicode(askcategorialink,'UTF-8')\r\n askcategorialink=_url+askcategorialink\r\n \r\n if c.find('img',{'class':'img-video-tn'})!=None: #se ha il video\r\n \r\n d=c.findAll('div')\r\n d=d[3].text\r\n d=d.replace(askcategoria,'')\r\n d=d.strip()\r\n d=d.split()\r\n \r\n d=d[0]\r\n numerorisposte=d \r\n numerorisposte=unicode(str(numerorisposte), 'utf-8')\r\n \r\n else:\r\n d=c.findAll('div')\r\n d=d[2].text\r\n d=d.replace(askcategoria,'')\r\n d=d.strip()\r\n d=d.split()\r\n if d=='' or d==u'' or d==[]: #quando non ci sono risposte\r\n d=0 \r\n else:\r\n d=d[0]\r\n \r\n numerorisposte=d \r\n numerorisposte=unicode(str(numerorisposte), 'utf-8')\r\n\r\n else:\r\n #print 'tipo 1'\r\n \r\n d=c.findAll('a')\r\n #d[0]['href'] #indirizzoRisposta\r\n paginarisposte=d[0]['href']\r\n _url=self.__language+self.__urlYahoo[:-1]\r\n paginarisposte=unicode(paginarisposte,'UTF-8')\r\n paginarisposte=_url+paginarisposte \r\n \r\n askcategoria=d[2].text #categoria/e\r\n askcategoria=askcategoria.strip()\r\n askcategorialink=d[2]['href'] #indirizzo categoria\r\n _url=self.__language+self.__urlYahoo[:-1]\r\n askcategorialink=unicode(askcategorialink,'UTF-8')\r\n askcategorialink=_url+askcategorialink\r\n d=c.findAll('div')\r\n \r\n d=d[2].text\r\n d=d.strip()\r\n d=d.split()\r\n \r\n numerorisposte=d[-(len(askcategoria.split())+3)]\r\n numerorisposte=int(numerorisposte)\r\n\r\n numerorisposte=unicode(str(numerorisposte), 'utf-8') \r\n\r\n page={'title':asktitle, 'body':askbody, 'categoria':askcategoria, \\\r\n 'categoria url':askcategorialink,'ask url':paginarisposte, \\\r\n 'risposte':numerorisposte}\r\n pag.append(page)\r\n if len(pag)>int(self.__numRisultati):\r\n self.__topicpages=pag\r\n return pag\r\n \r\n indexpages+=1 \r\n urlpage=self.__costruisciUrl(indexpages)\r\n pagina=self.__OpenPage(urlpage)\r\n if not pagina:\r\n return False\r\n except Exception, e:\r\n ErrorLog2.ErrorLog(self.__class__.__name__, 'TopicPages', e)\r\n return False",
"def parse_event_list(self, response):\n for event in response.css(\".view-content .article-title a::attr(href)\"):\n event_url = event.extract()\n yield scrapy.Request(\n response.urljoin(event_url),\n callback=self.parse_event_page,\n dont_filter=True,\n )\n next_url = self._response_next_url(response)\n if next_url:\n yield scrapy.Request(\n response.urljoin(next_url),\n callback=self.parse_event_list,\n dont_filter=True,\n )",
"def test_get_urls(self):\r\n OFFER_URLS = [\"http://olx.pl/offer1\",\r\n \"http://olx.pl/offer2\",\r\n \"http://olx.pl/offer3\",\r\n \"http://olx.pl/offer4\",\r\n \"http://olx.pl/offer5\",\r\n \"http://olx.pl/offer6\"]\r\n\r\n SEARCH_QUERY = \"http://SEARCH_QUERY_URL?\"\r\n \r\n for url in OfferSearcher.search(SEARCH_QUERY, 6, WebDocumentFetcherStub):\r\n self.assertTrue(url in OFFER_URLS, \"Unexpected offer url fetched: %s\" % url)\r\n OFFER_URLS.remove(url)\r\n \r\n self.assertEquals(0, len(OFFER_URLS), \"Not all offer urls fetched: %s\" % OFFER_URLS)",
"def get_speakers(self, request):\n return self.speaker_service.get_speakers()"
]
| [
"0.6096879",
"0.6059175",
"0.6013439",
"0.60043585",
"0.5761647",
"0.570113",
"0.56948954",
"0.565329",
"0.55780137",
"0.55446094",
"0.55062586",
"0.5449724",
"0.5413576",
"0.5397126",
"0.537303",
"0.5372341",
"0.5364773",
"0.5361135",
"0.53572524",
"0.534847",
"0.532454",
"0.5311882",
"0.52996",
"0.5292324",
"0.5269689",
"0.5267778",
"0.5260616",
"0.5257161",
"0.52407026",
"0.5238879"
]
| 0.66971195 | 0 |
Downloads all SOTU addresses as TXT files. | def download_speeches(sotu_links: dict):
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate",
}
for name, link in sotu_links.items():
with open(f'speeches/{slugify(name)}.txt', 'w') as fout:
fout.write(clean_sotu(link, headers)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download (url):\n path, url = url\n r = requests.get (url, stream = True)\n content = r.text\n #print (content)\n with open (path + '.txt', 'w') as f:\n f.write (content)",
"def download_all(): #@save\n for name in DATA_HUB:\n download(name)",
"def download(all):\n print(\"Downloading\")",
"def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))",
"async def retrieve_tails(self):\n if not self._tails_public_uri:\n raise RevocationError(\"Tails file public URI is empty\")\n\n LOGGER.info(\n \"Downloading the tails file for the revocation registry: %s\",\n self.registry_id,\n )\n\n tails_file_path = Path(self.get_receiving_tails_local_path())\n tails_file_dir = tails_file_path.parent\n if not tails_file_dir.exists():\n tails_file_dir.mkdir(parents=True)\n\n buffer_size = 65536 # should be multiple of 32 bytes for sha256\n file_hasher = hashlib.sha256()\n with open(tails_file_path, \"wb\", buffer_size) as tails_file:\n with Session() as req_session:\n try:\n resp = req_session.get(self._tails_public_uri, stream=True)\n # Should this directly raise an Error?\n if resp.status_code != http.HTTPStatus.OK:\n LOGGER.warning(\n f\"Unexpected status code for tails file: {resp.status_code}\"\n )\n for buf in resp.iter_content(chunk_size=buffer_size):\n tails_file.write(buf)\n file_hasher.update(buf)\n except RequestException as rx:\n raise RevocationError(f\"Error retrieving tails file: {rx}\")\n\n download_tails_hash = base58.b58encode(file_hasher.digest()).decode(\"utf-8\")\n if download_tails_hash != self.tails_hash:\n try:\n os.remove(tails_file_path)\n tails_file_dir.rmdir()\n except OSError as err:\n LOGGER.warning(f\"Could not delete invalid tails file: {err}\")\n\n raise RevocationError(\n \"The hash of the downloaded tails file does not match.\"\n )\n\n self.tails_local_path = str(tails_file_path)\n return self.tails_local_path",
"def download_files(self):",
"def download_all(self):\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n for website in self.website_list:\r\n self.download(website['id'])",
"def download_latest_addresses(self):\n file_id, data = self._gdrive.get_last_address_data_file()\n self._datastore.force_data_storage('address', data)",
"def download_names():\n if not os.path.exists(\"tmp\"):\n os.makedirs(\"tmp\")\n\n filename = \"temp.txt\"\n with open(os.path.join(\"tmp\", filename), \"wb\") as f:\n f.write(\"\\n\".join(app.config[\"generated_names\"]).encode(\"utf-8\"))\n f = open(os.path.join(\"tmp\", filename), \"rb\")\n resp = send_file(f, as_attachment=True, attachment_filename=\"generated_names.txt\",\n cache_timeout=0)\n file_remover.cleanup_once_done(resp, \"tmp\")\n return resp",
"def save_to_file(urls):\n try:\n with open('url.txt', 'w') as file:\n for url in urls:\n file.write(url + \"\\n\")\n except:\n print(\"ERROR SAVING FILE\")",
"def download_all_files(self):\n self.server_handler.get_sensor_data_from_server()",
"def write_downloaded_links():\n global downloaded_links_fn\n text_file = open(downloaded_links_fn,\"w\")\n for link in downloaded_links.items():\n text_file.write(link[0] + \"\\n\")\n text_file.close()",
"def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')",
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def rpc_getaddressutxos(self, addresses: list) -> list:\n return self._call_command([\"getaddressutxos\", {\"addresses\": addresses}])",
"def download_url(url):\n # use url_checker to verify URL is using the full address\n url_name = url_checker(url)\n if url_name:\n print(f'Requesting page {url_name}')\n tstamp = get_tstamp()\n # set the headers like we are a browser\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/72.0.3626.109 Safari/537.36'}\n # download the page\n response = requests.get(url, headers=headers)\n\n # create directory for saving file\n URL_DIR_NAME = os.path.join(OUTPUT_DIR, str(url_name))\n URL_TM_DIR_NAME = os.path.join(URL_DIR_NAME, str(tstamp))\n # create directory using url name and timestamp for directories\n ensure_dir(URL_TM_DIR_NAME)\n # save downloaded page as a .txt file\n with open(f'{URL_TM_DIR_NAME}{slash}response.html', 'w') as f:\n print(response.text, file=f)\n # use beautiful soup to extract links\n links = []\n soup = BeautifulSoup(response.text, 'html.parser')\n tags = soup.find_all('a')\n # append links to links list\n for tag in tags:\n links.append(tag.get('href'))\n # get only unique values and sort\n my_set = set(links)\n u_links = list(my_set)\n u_links.sort()\n # save links as a .txt file\n with open(f'{URL_TM_DIR_NAME}{slash}links.txt', 'w') as f:\n for list_item in u_links:\n f.write(f'{list_item}\\n')",
"def download(self):\n return [d.download(ignoreErrors=True) for d in self.parse()]",
"async def scan_UTXOs(sochain_url, network, address):\n utxos = await sochain_api.get_unspent_txs(sochain_url, network, address)\n utxos = list(map(sochain_utxo_to_xchain_utxo, utxos))\n return utxos",
"def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')",
"def downloadAll(self, force=False):\n if self.minutesSinceLastUpdate() == 0 and force == False:\n self.log(\"TOO SOON SINCE LAST DOWNLOAD!\")\n return\n for grabber in self.grabbers:\n self.downloadGrab(grabber[\"url\"], grabber[\"ID\"])+\"\\n\"",
"def download_all(self, to: str = None) -> Generator:\n\n for filename in self.list_files():\n yield (self.download(filename, to))",
"def url_to_file():\n urls = argToList(demisto.getArg('urls'))\n files = []\n for i in range(len(urls)):\n fileEntry = fileResult('url_' + str(i + 1), '[InternetShortcut]\\nURL=' + str(urls[i]))\n files.append(fileEntry)\n demisto.results(files)",
"def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")",
"def Save_Fastas2(UniprotIDs):\r\n file=open(\"../Data/Negative_cases/negative_cases.fasta\",\"w\")\r\n for ID in UniprotIDs:\r\n data=urllib.request.urlopen(\"http://www.uniprot.org/uniprot/%s.fasta\" %ID)\r\n f=data.readlines()\r\n for lines in f:\r\n file.write(str(lines))\r\n #help(data)\r\n file.close()",
"def download_all_data(self) -> None:\n print(\"Download in progress.\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_CHARACTERS\"], \"nextcloud\")\n self.download_data(os.environ[\"NC_TOKEN_TRAIN_FRAGMENTS\"], \"nextcloud\")\n self.download_data(os.environ[\"HABBAKUK_URL\"], \"generic_url\")\n print(\"Download complete!\")",
"def read_url_all(url):\n\n\t\treturn write_file(read_url(url))",
"def _get_download_urls(self):\n path = \"//path/to/text/text()\"\n return list(self.html.xpath(path))",
"def download():\n toydata = requests.get(DATA_URL).json()\n return toydata",
"def get_files_to_download(self):\n\n self.logger.logMsg(\"Getting Files to Download\")\n\n download_links = []\n try:\n with open(self.main_xml) as xml_file:\n data_dict = xmltodict.parse(xml_file.read())\n\n xml_file.close()\n\n for docs in data_dict.get('response').get('result').get('doc'):\n for doc in docs.get('str'):\n\n if doc.get('@name') == 'download_link':\n link = doc.get('#text', None)\n if link is not None:\n download_links.append(link)\n except Exception as e:\n self.logger.logMsg(\"Error Getting Files to Download {}\".format(str(e)))\n raise Exception('Error in Getting Files For Download')\n\n self.logger.logMsg(\"Finished Getting Files to Download\")\n\n return download_links",
"def download(url, save_as):\n\topen(save_as, 'w').write(urllib2.urlopen(url).read())"
]
| [
"0.59053004",
"0.587922",
"0.5870572",
"0.5778265",
"0.57235277",
"0.56239617",
"0.56174624",
"0.55523086",
"0.55497265",
"0.55383825",
"0.5523424",
"0.5518361",
"0.5505957",
"0.5397734",
"0.53777903",
"0.535254",
"0.53073",
"0.5300313",
"0.5297862",
"0.5297141",
"0.5294974",
"0.5254013",
"0.5242746",
"0.52412295",
"0.52265877",
"0.5220013",
"0.51928127",
"0.51790744",
"0.51657677",
"0.5155832"
]
| 0.60333025 | 0 |
The immutable annotation can be bypassed via synonyms | def test_synonym_annotation(self) -> None:
syna = SynonymAnnotation(col_regular='a', col_immutable='b')
# The columns behave as expected:
assert syna.col_regular == 'a'
assert syna.col_immutable == 'b'
syna.col_regular = 'x'
assert syna.col_regular == 'x'
with pytest.raises(ImmutableColumnError):
syna.col_immutable = 'y'
assert syna.col_immutable == 'b'
with pytest.raises(ImmutableColumnError):
syna.syn_to_immutable = 'y'
assert syna.syn_to_immutable == 'b'
assert syna.col_immutable == 'b' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_annotations(source: str, target: str) -> str:\n if isinstance(source, AnnotatedStr):\n if not isinstance(target, AnnotatedStr):\n target = AnnotatedStr(target)\n target.optional = source.optional\n target.exists = source.exists\n target.phony = source.phony\n target.precious = source.precious\n return target",
"def setAllowAnnotations(self,value):\n self.PDFreactorConfiguration.in1[\"allowAnnotations\"] = value",
"def replace(self, input_anno: Annotation) -> Tuple[bool, str]:\n raise NotImplementedError",
"def test_synonym(self): \n pass",
"def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)",
"def test_promote_metadata():\n\n def original(dispatcher, intent):\n \"\"\"Original!\"\"\"\n return 1\n\n original.attr = 1\n wrapped = do(original)\n assert wrapped.__name__ == \"original\"\n assert wrapped.attr == 1\n assert wrapped.__doc__ == \"Original!\"",
"def test_attr_in_annotations() -> None:\n for model in (IdOnly, IdUuid, UuidOnly):\n assert issubclass(model, ModelBase)\n assert (\n 'is_immutable'\n in model.__column_annotations__['immutable'] # type: ignore[attr-defined]\n )\n assert (\n 'is_cached'\n in model.__column_annotations__['cached'] # type: ignore[attr-defined]\n )",
"def policy_alias(self):",
"def setAnnotation(self, *args):\n return _libsbml.SBase_setAnnotation(self, *args)",
"def annotate(m, ss_seq): # -> None:\n ...",
"def get_disambiguator(self):",
"def get_disambiguator(self):",
"def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)",
"def test_base_attrs_in_annotations() -> None:\n for model in (IdOnly, IdUuid, UuidOnly):\n assert issubclass(model, ModelBase)\n for attr in ('created_at', 'id'):\n assert (\n attr\n in model.__column_annotations__[ # type: ignore[attr-defined]\n 'immutable'\n ]\n )\n assert 'uuid' in IdUuid.__column_annotations__['immutable']",
"def __repr__(self):\n return f\"Annotation '{self.text}' at {super().__repr__()}\"",
"def setAnnotation(self, *args):\n return _libsbml.Model_setAnnotation(self, *args)",
"def addSemanticsAnnotation(self, *args):\n return _libsbml.ASTNode_addSemanticsAnnotation(self, *args)",
"def eye_like(self):\n raise NotImplementedError",
"def test_patch_namespaced_role_binding_restriction(self):\n pass",
"def setAnnotation(self, *args):\n return _libsbml.SpeciesReference_setAnnotation(self, *args)",
"def annihilate(cls):\n pass",
"def test_copy(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test_copy\")\n annot1 = s.add_annotation(Feature, \"exon\", \"annot1\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"annot2\", [(10, 14)])\n got = s.copy()\n got_annot1 = got.get_annotations_matching(\n annotation_type=\"exon\", name=\"annot1\"\n )[0]\n got_annot2 = got.get_annotations_matching(\n annotation_type=\"exon\", name=\"annot2\"\n )[0]\n self.assertIsNot(got, s)\n self.assertIsNot(got_annot1, annot1)\n self.assertIsNot(got_annot2, annot2)\n self.assertEqual(got.name, s.name)\n self.assertEqual(got.info, s.info)\n self.assertEqual(got._seq, s._seq)\n self.assertEqual(got.moltype, s.moltype)\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)",
"def __species_annotation__(self,aggregation_so_far,annotation):\n return Survey.__species_annotation__(self,aggregation_so_far,[annotation])",
"def UseAttribute(self) -> bool:",
"def sanitized(self) -> AnnotationWrapper:\n res = self.replace('typing.', ''). \\\n replace('telegrambotapiwrapper.typelib.', ''). \\\n replace(\"<class '\", \"\"). \\\n replace(\"'>\", \"\")\n return AnnotationWrapper(res)",
"def _update_annotation_with_default(anno, name, default):\n # Create instance if is type class\n complete_annotation = anno\n if _is_dsl_type_cls(anno):\n complete_annotation = anno()\n complete_annotation.name = name\n if default is Input._EMPTY:\n return complete_annotation\n if isinstance(complete_annotation, Input):\n # Non-parameter Input has no default attribute\n if complete_annotation._is_parameter_type and complete_annotation.default is not None:\n # logger.warning(\n # f\"Warning: Default value of f{complete_annotation.name!r} is set twice: \"\n # f\"{complete_annotation.default!r} and {default!r}, will use {default!r}\"\n # )\n pass\n complete_annotation._update_default(default)\n return complete_annotation",
"def test_06_add_adminsonly_and_update_annotation_visibility(self):\n\n # Admin creates an annotation only visible to admin\n self.addAnnotation(\"private annotation by admin\", self.user_vm.id, \"VM\", True)\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n annotation_id = self.added_annotations[-1].annotation.id\n\n # Verify users cannot see private annotations created by admins\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNone(\n userVisibleAnnotations,\n \"User must not access admin-only annotations\"\n )\n\n # Admin updates the annotation visibility\n cmd = updateAnnotationVisibility.updateAnnotationVisibilityCmd()\n cmd.id = annotation_id\n cmd.adminsonly = False\n self.apiclient.updateAnnotationVisibility(cmd)\n\n # Verify user can see the annotation after updating its visibility\n cmd = listAnnotations.listAnnotationsCmd()\n cmd.entityid = self.user_vm.id\n cmd.entitytype = \"VM\"\n cmd.annotationfilter = \"all\"\n userVisibleAnnotations = self.userApiClient.listAnnotations(cmd)\n self.assertIsNotNone(\n userVisibleAnnotations,\n \"User must access public annotations\"\n )\n\n # Remove the annotation\n self.removeAnnotation(annotation_id)\n del self.added_annotations[-1]",
"def __str__(self):\n return f\"Annotation '{self.text}' at {super().__repr__()}\"",
"def attr_synonyms(self):\n if self._modifier_exists(DUPLICATED_KEY):\n synonyms = self[CONFIG_KEY][SAMPLE_MODS_KEY][DUPLICATED_KEY]\n _LOGGER.debug(\"Applying synonyms: {}\".format(synonyms))\n for sample in self.samples:\n for attr, new in list(synonyms.items()):\n if attr in sample:\n setattr(sample, new, getattr(sample, attr))",
"def createAnnotation():\n return _libsbml.RDFAnnotationParser_createAnnotation()"
]
| [
"0.59897065",
"0.59733963",
"0.5937297",
"0.58056915",
"0.575385",
"0.5600894",
"0.558813",
"0.55193585",
"0.5471302",
"0.5451327",
"0.54463136",
"0.54463136",
"0.5412678",
"0.54107326",
"0.53659195",
"0.53585386",
"0.5329973",
"0.52853864",
"0.52679074",
"0.5257866",
"0.52498287",
"0.52303004",
"0.5220696",
"0.5204278",
"0.5197066",
"0.5192028",
"0.5190424",
"0.5175901",
"0.51744306",
"0.5171913"
]
| 0.6924903 | 0 |
Peer down handler. Cleans up the paths in global tables that was received from this peer. | def on_peer_down(self, peer):
LOG.debug('Cleaning obsolete paths whose source/version: %s/%s',
peer.ip_address, peer.version_num)
# Launch clean-up for each global tables.
self._table_manager.clean_stale_routes(peer) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _bg_clean_up_peer(self, peer):\n with self._peers_lock:\n del self._peers[peer.peer_id]\n del self._sock_to_peer[peer.socket]\n peer.close()\n peer = None",
"def handle_link_down (self, port):\n for dest in self.hosts.keys():\n currPort = self.hosts[dest][0]\n if currPort == port:\n del self.hosts[dest]\n \n deleteDests = set()\n for dest in self.routesToDest:\n currPort = self.routesToDest[dest][0]\n \n if currPort == port:\n\n if dest in self.hosts:\n self.routesToDest[dest] = self.hosts[dest]\n packet = basics.RoutePacket(dest, self.routesToDest[dest][1])\n self.send(packet, self.routesToDest[dest][0], True)\n else:\n self.sendPoison(dest)\n deleteDests.add(dest)\n\n\n for dest in deleteDests:\n del self.routesToDest[dest]\n\n del self.neighbours[port]",
"def cleanup_on_disconnect(self, datapath):\n self.delete_all_flows(datapath)",
"def down(self, connection):\n raise NotImplementedError",
"def Shutdown(self):\n logger.debug(\"Nodeleader shutting down\")\n\n self.stop_peer_check_loop()\n self.peer_check_loop_deferred = None\n\n self.stop_check_bcr_loop()\n self.check_bcr_loop_deferred = None\n\n self.stop_memcheck_loop()\n self.memcheck_loop_deferred = None\n\n self.stop_blockheight_loop()\n self.blockheight_loop_deferred = None\n\n for p in self.Peers:\n p.Disconnect()",
"def ForgetPeersPieces(self,peer):\n self.ComplainIfNoSuchPeer(peer)\n self.peerDatabase[peer].localStorage = 0\n for pieceName in self.peerDatabase[peer].storedData.keys():\n fileName = self.GetNameForPeerFile(pieceName,peer)\n try:\n os.remove(fileName)\n except Exception, e:\n msg = ('WARNING: Could not remove file \"' + fileName +\n '\" because of exception \"' + `e` + '\", continuing.')\n dibs_logger.Logger.PrintAndLog(msg,dibs_logger.LOG_ERROR)\n self.peerDatabase[peer].storedData = {}",
"def __del__(self):\n table_data = open(os.path.join(self.saved_tables_path, self.info_hash + \".obj\"), 'wb')\n for peer_id in self.peers.keys():\n self.peers[peer_id] = self.peers[peer_id].__dict__\n\n table_data.write(json.dumps(self.__dict__))\n table_data.close()",
"def __shut_down(self):\n\n for line, buses in self.__bus_dict.items():\n for bus in buses:\n bus.send_to_bus(\"Server Shut Down\")",
"def tearDown(self):\n logging.debug('tearing down')",
"def tearDown(self):\n logging.debug('tearing down')",
"def _postTearDown(self):\r\n if not core.FW_conf['should_stop'] and \\\r\n (not core.FW_conf['connection'].isFollower() or core.FW_conf['connection'].isFullBlackBox()):\r\n if core.FW_conf['connection'].currentTcId:\r\n if not (core.FW_conf['connection'].isFullBlackBox() or core.FW_conf['connection'].isFollower()):\r\n core.FW_conf['connection']._getCrashDumps()\r\n elif not core.FW_conf['connection'].isFollower():\r\n core.FW_conf['connection']._getCrashDumpsInBlackBox()\r\n\r\n if not core.FW_conf['connection'].isLeader() and \\\r\n not core.FW_conf['connection'].isFollower():\r\n # disconnect TA server(s) and scripting service(s)\r\n for phone in core.FW_conf['connection']:\r\n phone._tab._disconnectServices()\r\n\r\n # get and remove x-files from remote phone(s)\r\n for remote in core.FW_conf['remote_connection']:\r\n resp = self.logApply(remote._fileDumper.extractDumpFiles)\r\n if resp == False:\r\n remote.warn('Getting X-files (in tearDown) failed: %s.' % resp)\r\n\r\n resp = self.logApply(remote._fileDumper.removeDumpFiles)\r\n if resp == False:\r\n remote.warn('Removing X-files(in tearDown) failed: %s.' % resp)",
"def cleanup(self):\n self.msgmap.clear()\n self.droppedmsgs.clear()\n self.chan.stop_receiving_messages()\n\n # TODO: enable\n #self.cmdMap.clear()\n #self.cmdCliSubmitQueue.clear()\n #self.cmdSvrComputeQueue.clear()\n #self.droppedCommands.clear()\n #self.ch.stop_receiving_commands()",
"def postShutdown(self):\r\n self._network.cleanUp()\r\n self._balancer.cleanUp()\r\n self._distributor.cleanUp()",
"def connectionLost(self, reason):\r\n _Protocol.remote_destroy(self)",
"def _post_teardown(self):\n self.delete_connections_indices()\n super()._post_teardown()",
"def _post_teardown(self):\n self.delete_connections_indices()\n super()._post_teardown()",
"def _post_teardown(self):\n self.delete_connections_indices()\n super()._post_teardown()",
"def forget(self):\n self.ingress_tbl.clear()\n self.rootsw_tbl.clear()",
"def tearDown(self) -> None:\n self.inverter.disconnect()\n self.sock.close()",
"def cleanUp(self):\n self.isConnected=False\n self.spawnProc=None",
"def shutdown(self):\n\t\tself._log.info('shutting down DHT')\n\t\tself._threads.shutdown() # Trigger shutdown of maintainance threads\n\t\tself._krpc.shutdown() # Stop listening for incoming connections\n\t\tself._nodes.shutdown()\n\t\tself._threads.join() # Trigger shutdown of maintainance threads",
"def disconnect(self):\n try:\n super(NERDmLoader, self).disconnect()\n finally:\n self.lateloadr._client = None\n self.relloadr._db = None",
"def teardown(cls):\n super(TestUpgradeConnectionLocally, cls).teardown()\n\n os.chdir(cls.cwd)\n try:\n shutil.rmtree(cls.t)\n except (OSError, IOError):\n pass",
"def __del__(self):\n del self.board_\n del self.children_edges_\n self.board_ = None\n del self.parent_edge_\n # print(\"destruct node\")",
"def teardown_class(cls):\n cls.multiplexer_client_1.disconnect()\n cls.multiplexer_client_2.disconnect()\n cls.multiplexer_node.disconnect()\n\n os.chdir(cls.cwd)\n try:\n shutil.rmtree(cls.t)\n except (OSError, IOError):\n pass",
"def teardown_class(cls):\n cls.multiplexer_client_1.disconnect()\n cls.multiplexer_client_2.disconnect()\n cls.multiplexer_node.disconnect()\n\n os.chdir(cls.cwd)\n try:\n shutil.rmtree(cls.t)\n except (OSError, IOError):\n pass",
"def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')",
"def connection_lost(self, reason):\n peer = \"<None>\" if self.remote_vk is None else b64encode(self.remote_vk)\n\n if not self.factory.first_disconnect_logged:\n logging.info(\"NODE: deleting peer {}, reason {}\".format(peer, reason))\n self.factory.first_disconnect_logged = True\n else:\n logging.debug(\"NODE: deleting peer {}, reason {}\".format(peer, reason))\n\n try:\n del self.peers[self.remote_vk]\n except KeyError:\n logging.warning(\"NODE: peer {} already deleted\".format(b64encode(self.remote_vk)))\n\n stop_reactor()",
"def tunnel_down(self, org_cb):\n _log.info(\"storage proxy down\")\n if not self.tunnel:\n return True\n _log.analyze(self.node.id, \"+ CLIENT\", {'tunnel_id': self.tunnel.id})\n self.tunnel = None\n # FIXME assumes that the org_cb is the callback given by storage when starting, can only be called once\n # not future up/down\n if org_cb:\n org_cb(False)\n # We should always return True which sends an ACK on the destruction of the tunnel\n return True",
"def tearDown(self) -> None:\n self.inverter.sock.close()\n self.sock.close()"
]
| [
"0.61131",
"0.6081272",
"0.58917975",
"0.5844231",
"0.58277273",
"0.57862747",
"0.5777576",
"0.5760708",
"0.5760111",
"0.5760111",
"0.5746705",
"0.5727138",
"0.56835914",
"0.5668566",
"0.56587297",
"0.56587297",
"0.56587297",
"0.5655229",
"0.56371796",
"0.56203717",
"0.56137556",
"0.5582284",
"0.55577487",
"0.5547298",
"0.55189866",
"0.55189866",
"0.5517053",
"0.5515273",
"0.54983306",
"0.54851604"
]
| 0.794418 | 0 |
Returns list of peers in established state. | def get_peers_in_established(self):
est_peers = []
for peer in self._peers.values():
if peer.in_established:
est_peers.append(peer)
return est_peers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def peers():\n return flask.jsonify(api_utils.get_peer_conf_and_state())",
"def peer_list_active(self):\n return self.client.call('GET', self.name + 'peer-list/active')",
"def peer_list_reachable(self):\n return self.client.call('GET', self.name + 'peer-list/reachable')",
"def get_peer_nodes(self):\n return list(self.__peer_nodes)",
"def get_peer_nodes(self):\n return list(self.__peer_nodes)",
"def peer_list_all(self):\n return self.client.call('GET', self.name + 'peer-list/all')",
"async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}",
"def peers(self):\n\n peers_data = ''\n for peer in getattr(self._peer, 'peers', []):\n peers_data += peer.config().remote_config\n return peers_data",
"def peers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"peers\")",
"def render_peers(self):\n return [peer.jsonify() for peer in self.peers]",
"def getConnectedPeers(self, peerType):\r\n raise NotImplementedError()",
"def bgp_peers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BgpPeerArgs']]]]:\n return pulumi.get(self, \"bgp_peers\")",
"def get_all_resources(self) -> typing.List:\n\n session = self.session()\n\n try:\n available_peers = session\\\n .query(\n ResourceTable.peerIp,\n ResourceTable.peerPort,\n ResourceTable.resourcePath,\n ResourceTable.resourceName,\n ResourceTable.resourceHash\n )\\\n .group_by(ResourceTable.peerId, ResourceTable.resourceHash)\\\n .all()\n\n return available_peers\n\n finally:\n session.close()",
"def list_conns(self):\n\t\tres = []\n\t\tself.AL.acquire()\n\t\tfor ls in self.ls.keys():\n\t\t\tinfo = self.ls[ls]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Relay\", \"LOCAL\", info[\"local\"], info[\"peer\"],\n\t\t\t\t\tinfo[\"port\"], info[\"got\"], None,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tfor s in self.s2i.keys():\n\t\t\tinfo = self.s2i[s]\n\t\t\tif info[\"creator\"] == self.cid:\n\t\t\t\tfai = \"LOCAL\"\n\t\t\t\ttai = info[\"peer\"]\n\t\t\telse:\n\t\t\t\tfai = info[\"creator\"]\n\t\t\t\ttai = info[\"peer\"]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Conn\", fai, info[\"local\"], tai, info[\"port\"],\n\t\t\t\t\tinfo[\"recv\"], info[\"send\"]\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tself.AL.release()\n\t\treturn res",
"def complete_list_of_states():\n # funny way of getting all the states that are defined in ConcertClientState.msg\n return concert_msgs.ConductorGraph.__slots__",
"def get_candidate_list(self):\n return self.candidate_list",
"def vertices(self):\n return self._outgoing.keys()",
"def test_all_peer_status(self):\n statuses = self.pybird.get_peer_status()\n\n self.assertEquals(statuses[0]['name'], \"PS1\")\n self.assertEquals(statuses[0]['state'], \"Passive\")\n self.assertEquals(statuses[1]['name'], \"PS2\")\n self.assertEquals(statuses[1]['state'], \"Established\")",
"def _get_hosts_from_state(state):\n active_nodes = set()\n for shard, shard_data in state.get('shards', {}).items():\n replicas = shard_data['replicas']\n for replica, replica_data in replicas.items():\n if replica_data['state'] == 'active':\n active_nodes.add(replica_data['base_url'])\n\n return active_nodes",
"def state_addresses(self):\n return self.switch.state_addresses()",
"def bgp_peers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceLoadBalancerBgpPeerArgs']]]]:\n return pulumi.get(self, \"bgp_peers\")",
"def edges_list(self):\n return self._edges_list",
"def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers",
"def GetPassivePeers(self):\n \n passivePeers = filter(lambda x: self.peerDatabase[x].listen=='passive',\n self.peerDatabase.keys())\n return map(lambda p: (p,self.peerDatabase[p]),passivePeers)",
"def peer_ipv4_addresses(self) -> Dict[str, List[IPv4Address]]:\n return self._get_ipv4_addresses(\"peer\")",
"def get_peers(self):\n self.peers = []\n retriever_methods = [\n m\n for m in rtorrent9.peer.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # need to leave 2nd arg empty (dunno why)\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"p.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n self.peers.append(Peer(self._rt_obj, self.info_hash, **results_dict))\n\n return self.peers",
"def get_visited_nodes(self):\n return self.visited_nodes",
"def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()",
"def getVertices(self):\n return list(self.adjList.keys())",
"def E(self) -> list:\n res = []\n for v in self.V():\n res.extend([(v.name, i) for i in v.get_connections().keys()])\n return res"
]
| [
"0.76069987",
"0.7319591",
"0.7274298",
"0.71916944",
"0.71916944",
"0.7083651",
"0.6903163",
"0.6896505",
"0.62460506",
"0.5997698",
"0.5910318",
"0.5833534",
"0.5781555",
"0.57754815",
"0.57457167",
"0.5735629",
"0.56861264",
"0.56781614",
"0.56751245",
"0.5670917",
"0.56516445",
"0.5639862",
"0.56255925",
"0.5602655",
"0.5564519",
"0.556236",
"0.5536089",
"0.55293393",
"0.5523397",
"0.55167437"
]
| 0.83943206 | 0 |
For given `peer` resend sent paths. | def resend_sent(self, route_family, peer):
if peer not in self._peers.values():
raise ValueError('Could not find given peer (%s)' % peer)
if route_family not in SUPPORTED_GLOBAL_RF:
raise ValueError(
'Given route family (%s) is not supported.' % route_family
)
# Iterate over the global table for given afi, safi and enqueue
# out-going routes.
table = self._table_manager.get_global_table_by_route_family(
route_family
)
for destination in table.values():
# Check if this destination's sent - routes include this peer.
# i.e. check if this destinations was advertised and enqueue
# the path only if it was. If the current best-path has not been
# advertised before, it might already have a OutgoingRoute queued
# to be sent to the peer.
sent_routes = destination.sent_routes
if sent_routes is None or len(sent_routes) == 0:
continue
for sent_route in sent_routes:
if sent_route.sent_peer == peer:
# update med - if previously med was set per neighbor or
# wasn't set at all now it could have changed and we may
# need to set new value there
p = sent_route.path
if p.med_set_by_target_neighbor or p.get_pattr(
BGP_ATTR_TYPE_MULTI_EXIT_DISC) is None:
sent_route.path = \
clone_path_and_update_med_for_target_neighbor(
sent_route.path, peer.med
)
ogr = OutgoingRoute(sent_route.path,
for_route_refresh=True)
peer.enque_outgoing_msg(ogr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)",
"def PeerForgotUs(self,peer):\n\n filesAffected = []\n self.peerDatabase[peer].ZeroRemoteStorage()\n for fileName in self.fileDatabase.keys():\n fileRecord = self.fileDatabase[fileName]\n lost = fileRecord.MarkPiecesFromPeerAsLost(peer)\n if (lost):\n filesAffected.append(fileName)\n return filesAffected",
"def rtg_finish_path(self):\n path_slice = slice(self.path_start_idx, self.ptr)\n ep_rews = self.rew_buf[path_slice]\n ep_ret = utils.reward_to_go(ep_rews)\n self.ret_buf[path_slice] = ep_ret\n self.adv_buf[path_slice] = ep_ret",
"def send_path(self, path):\n self.clear_path()\n for coordinate in path:\n self.send_coordinate(coordinate)\n time.sleep(0.05)",
"def on_peer_down(self, peer):\n LOG.debug('Cleaning obsolete paths whose source/version: %s/%s',\n peer.ip_address, peer.version_num)\n # Launch clean-up for each global tables.\n self._table_manager.clean_stale_routes(peer)",
"def resend_email(self, userdict):\n return self.post('resend', userdict)",
"def ForgetPeersPieces(self,peer):\n self.ComplainIfNoSuchPeer(peer)\n self.peerDatabase[peer].localStorage = 0\n for pieceName in self.peerDatabase[peer].storedData.keys():\n fileName = self.GetNameForPeerFile(pieceName,peer)\n try:\n os.remove(fileName)\n except Exception, e:\n msg = ('WARNING: Could not remove file \"' + fileName +\n '\" because of exception \"' + `e` + '\", continuing.')\n dibs_logger.Logger.PrintAndLog(msg,dibs_logger.LOG_ERROR)\n self.peerDatabase[peer].storedData = {}",
"def send_route_refresh(peer_ip):\n LOG.debug('Try to send route refresh to peer %s', peer_ip)\n json_request = flask.request.get_json()\n if 'afi' in json_request and 'safi' in json_request:\n if 'res' not in json_request:\n res = 0\n else:\n res = json_request['res']\n result = api_utils.send_route_refresh(\n peer_ip=peer_ip, afi=json_request['afi'], safi=json_request['safi'], res=res)\n return flask.jsonify(result)\n return flask.jsonify({\n 'status': False,\n 'code': 'please check your post data'\n })",
"def send_file_directory(weak_peer_socket):\n\n try:\n send_message(weak_peer_socket, \"\", json.dumps(global_peer_files))\n except:\n # client closed connection, violently or by user\n return False",
"def handle_follow_path(mqtt_client):\n print('sending the follow path message')\n mqtt_client.send_message('follow_path')",
"def _send_response(self, result, peer):\n try:\n response = json.dumps(result).encode()\n self._socket.sendto(response, peer)\n except (ConnectionRefusedError, FileNotFoundError, PermissionError,\n TypeError):\n pass",
"def adv_path_finish(self):\n path_slice = slice(self.path_start_idx, self.ptr)\n ep_rews = self.rew_buf[path_slice]\n ep_vals = self.val_buf[path_slice]\n ep_ret = utils.reward_to_go(ep_rews)\n self.ret_buf[path_slice] = ep_ret\n self.adv_buf[path_slice] = ep_ret - ep_vals",
"def handle_revert_device(self, peer, sender, bus, topic, headers, message):\n point = topic.replace(topics.ACTUATOR_REVERT_DEVICE() + '/', '', 1)\n requester = sender\n headers = self._get_headers(requester)\n\n try:\n self._revert_device(requester, point)\n except RemoteError as ex:\n self._handle_remote_error(ex, point, headers)\n except StandardError as ex:\n self._handle_standard_error(ex, point, headers)",
"def handle_revert_point(self, peer, sender, bus, topic, headers, message):\n point = topic.replace(topics.ACTUATOR_REVERT_POINT() + '/', '', 1)\n requester = sender\n headers = self._get_headers(requester)\n\n try:\n self._revert_point(requester, point)\n except RemoteError as ex:\n self._handle_remote_error(ex, point, headers)\n except StandardError as ex:\n self._handle_standard_error(ex, point, headers)",
"def _collect_peers_of_interest(self, new_best_path):\n path_rts = new_best_path.get_rts()\n qualified_peers = set(self._peers.values())\n\n # Filter out peers based on RTC_AS setting if path is for RT_NLRI\n qualified_peers = self._rt_manager.filter_by_origin_as(\n new_best_path, qualified_peers\n )\n\n # We continue to filter out qualified peer based on path RTs\n # If new best path has RTs, we need to share this UPDATE with\n # qualifying peers\n if path_rts:\n # We add Default_RTC_NLRI to path RTs so that we can send it to\n # peers that have expressed interest in all paths\n path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)\n # All peers that do not have RTC capability qualify\n qualified_peers = set(self._get_non_rtc_peers())\n # Peers that have RTC capability and have common RT with the path\n # also qualify\n peer_to_rtfilter_map = self._peer_to_rtfilter_map\n for peer, rt_filter in peer_to_rtfilter_map.items():\n # Ignore Network Controller (its not a BGP peer)\n if peer is None:\n continue\n\n if rt_filter is None:\n qualified_peers.add(peer)\n elif rt_filter.intersection(path_rts):\n qualified_peers.add(peer)\n\n return qualified_peers",
"def _send_recv(self,package,addendum=None):\n self._send(package,addendum)\n self.last_results = self._recv()\n if(len(self.send_exc) or len(self.recv_exc)):\n self.handle_error()\n return self.last_results",
"def _peer_url(self, path):\r\n return \"http://127.0.0.1:{port}/peer_grading/{path}/\".format(\r\n port=self.server.port, path=path\r\n )",
"def route_rejected(self, prefix, next_hop, as_path):",
"async def store_peers(self, peer: Peer):\n await self.peers.store(peer)",
"def comm_all_rt_nlris(self, peer):\n # First check if for this peer mpbgp-rtc is valid.\n if not peer.is_mbgp_cap_valid(RF_RTC_UC):\n return\n\n neigh_conf = self._neighbors_conf.get_neighbor_conf(peer.ip_address)\n peer_rtc_as = neigh_conf.rtc_as\n # Iterate over all RT_NLRI destination communicate qualifying RT_NLRIs\n rtc_table = self._table_manager.get_rtc_table()\n for dest in rtc_table.values():\n best_path = dest.best_path\n # Ignore a destination that currently does not have best path\n if not best_path:\n continue\n\n # If this is a local path\n if best_path.source is None:\n # Check RT NLRI's origin AS matches peer RTC_AS setting\n origin_as = best_path.nlri.origin_as\n if origin_as == peer_rtc_as:\n peer.communicate_path(best_path)\n else:\n # Communicate all remote RT NLRIs\n peer.communicate_path(best_path)\n\n # Also communicate EOR as per RFC\n peer.enque_end_of_rib(RF_RTC_UC)",
"def method_others(self, method, path, protocol):\n path = path[len('http://'):]\n host, _, path = path.partition('/')\n path = '/{}'.format(path)\n self._connect_to_target(host)\n self.target.send('{method} {path} {protocol}\\n{client_buffer}'.format(\n method=method,\n path=path,\n protocol=protocol,\n client_buffer=self.client_buffer))\n self.client_buffer = ''\n self._read_write()",
"def relay(self, line):\n if not self.recv:\n self.recv = True\n self.sendingto = list(self.factory.getClients())\n for client in self.sendingto:\n if client != self:\n client.sendString(line)",
"def setPeer (self, peer):\n\t\tself.peer = peer",
"def done_sending(self):\r\n self._flush(True)",
"def relay_query_or_read_path(self):\n parsed_path = urlparse.urlparse(self.path)\n if hasattr(parsed_path, 'query'):\n # Python 2.5\n query = parsed_path.query\n path = parsed_path.path\n else:\n # Python 2.4\n scheme, netloc, path, parameters, query, fragment = parsed_path\n if query:\n message = relay(self, query)\n else:\n message = read_path(self, path)\n self.end_headers()\n self.wfile.write(message)",
"def test_peers_peerid_delete(self):\n pass",
"def _resend_subscriptions_and_strategies(self):\n for req in self._ws_jsonrpc_cache:\n self._logger.info('Resending JSONRPCRequest %s', req)\n result = yield self._send(req)\n self._logger.info('Resent JSONRPCRequest, with result: %s', result)",
"async def reroute(self, engine, outref, argrefs):\n return await self.subinf.reroute(engine, outref, argrefs)",
"def reconnect_peer(self, peer):\n if peer.next_heart_beat_time < self.app.tick:\n self.connect_peer(peer, first=False)\n else:\n logger.log(TRACE, \"Reconnect time for %s will be at %r, now is %r\",\n peer, peer.next_heart_beat_time, self.app.tick)",
"def test_peers_peerid_post(self):\n pass"
]
| [
"0.55400974",
"0.5527805",
"0.5385521",
"0.5316282",
"0.52486527",
"0.5176147",
"0.51740795",
"0.5169025",
"0.5047884",
"0.49399987",
"0.4920014",
"0.49139652",
"0.48995343",
"0.48900852",
"0.48659205",
"0.48195505",
"0.47898737",
"0.47351354",
"0.47132412",
"0.47109663",
"0.47004312",
"0.46618614",
"0.4653329",
"0.4617271",
"0.45754126",
"0.45374444",
"0.4528718",
"0.45238397",
"0.45224875",
"0.45067766"
]
| 0.69251496 | 0 |
Makes refresh request to all peers for given address family. Skips making request to peer that have valid RTC capability. | def req_rr_to_non_rtc_peers(self, route_family):
assert route_family != RF_RTC_UC
for peer in self._peers.values():
# First check if peer is in established state
if (peer.in_established and
# Check if peer has valid capability for given address
# family
peer.is_mbgp_cap_valid(route_family) and
# Check if peer has valid capability for RTC
not peer.is_mbgp_cap_valid(RF_RTC_UC)):
peer.request_route_refresh(route_family) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_route_refresh_request(self, peer_ip, *route_families):\n LOG.debug('Route refresh requested for peer %s and route families %s',\n peer_ip, route_families)\n if not SUPPORTED_GLOBAL_RF.intersection(route_families):\n raise ValueError('Given route family(s) % is not supported.' %\n route_families)\n\n peer_list = []\n # If route-refresh is requested for all peers.\n if peer_ip == 'all':\n peer_list.extend(self.get_peers_in_established())\n else:\n given_peer = self._peers.get(peer_ip)\n if not given_peer:\n raise ValueError('Invalid/unrecognized peer %s' % peer_ip)\n if not given_peer.in_established:\n raise ValueError('Peer currently do not have established'\n ' session.')\n peer_list.append(given_peer)\n\n # Make route refresh request to valid peers.\n for peer in peer_list:\n peer.request_route_refresh(*route_families)\n\n return True",
"def _refresh_discovery(self):\n if self.terminate_flag:\n return\n\n self.devices = discover_drones(self.ip_range, self.skyserve_port)\n time.sleep(self.refresh_interval / 1000)\n self._refresh_discovery()",
"def _collect_peers_of_interest(self, new_best_path):\n path_rts = new_best_path.get_rts()\n qualified_peers = set(self._peers.values())\n\n # Filter out peers based on RTC_AS setting if path is for RT_NLRI\n qualified_peers = self._rt_manager.filter_by_origin_as(\n new_best_path, qualified_peers\n )\n\n # We continue to filter out qualified peer based on path RTs\n # If new best path has RTs, we need to share this UPDATE with\n # qualifying peers\n if path_rts:\n # We add Default_RTC_NLRI to path RTs so that we can send it to\n # peers that have expressed interest in all paths\n path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)\n # All peers that do not have RTC capability qualify\n qualified_peers = set(self._get_non_rtc_peers())\n # Peers that have RTC capability and have common RT with the path\n # also qualify\n peer_to_rtfilter_map = self._peer_to_rtfilter_map\n for peer, rt_filter in peer_to_rtfilter_map.items():\n # Ignore Network Controller (its not a BGP peer)\n if peer is None:\n continue\n\n if rt_filter is None:\n qualified_peers.add(peer)\n elif rt_filter.intersection(path_rts):\n qualified_peers.add(peer)\n\n return qualified_peers",
"def apply_peers(\n peers: Iterable[Peer],\n name: str,\n namespace: Union[None, str],\n legacy: bool = False,\n):\n patch = {'status': {peer.id: None if peer.is_dead else peer.as_dict() for peer in peers}}\n resource = (LEGACY_PEERING_RESOURCE if legacy else\n CLUSTER_PEERING_RESOURCE if namespace is None else\n NAMESPACED_PEERING_RESOURCE)\n patching.patch_obj(resource=resource, namespace=namespace, name=name, patch=patch)",
"async def refresh(self):\n while True:\n await asyncio.sleep(5/6 * self.lifetime)\n\n request = stun.Message(message_method=stun.Method.REFRESH,\n message_class=stun.Class.REQUEST)\n request.attributes['LIFETIME'] = self.lifetime\n self.__add_authentication(request)\n await self.request(request, self.server)",
"def send_route_refresh(peer_ip):\n LOG.debug('Try to send route refresh to peer %s', peer_ip)\n json_request = flask.request.get_json()\n if 'afi' in json_request and 'safi' in json_request:\n if 'res' not in json_request:\n res = 0\n else:\n res = json_request['res']\n result = api_utils.send_route_refresh(\n peer_ip=peer_ip, afi=json_request['afi'], safi=json_request['safi'], res=res)\n return flask.jsonify(result)\n return flask.jsonify({\n 'status': False,\n 'code': 'please check your post data'\n })",
"def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()",
"async def _match_urgent_node_requests_to_peers(self) -> None:\n while self.manager.is_running:\n urgent_batch_id, urgent_hashes = await self._node_tasks.get(\n eth_constants.MAX_STATE_FETCH\n )\n\n # Get best peer, by GetNodeData speed\n queen = await self._queen_tracker.get_queen_peer()\n\n queen_is_requesting = queen.eth_api.get_node_data.is_requesting\n\n if queen_is_requesting:\n # Our best peer for node data has an in-flight GetNodeData request\n # Probably, backfill is asking this peer for data\n # This is right in the critical path, so we'd prefer this never happen\n self.logger.debug(\n \"Want to download urgent data, but %s is locked on other request\",\n queen,\n )\n # Don't do anything different, allow the request lock to handle the situation\n\n self._num_urgent_requests_by_peer[queen] += 1\n self._urgent_requests += 1\n\n await self._find_urgent_nodes(\n queen,\n urgent_hashes,\n urgent_batch_id,\n )",
"def test_peers_get(self):\n pass",
"async def peers_keepalive(\n *,\n ourselves: Peer,\n):\n try:\n while True:\n logger.debug(f\"Peering keep-alive update for {ourselves.id} (priority {ourselves.priority})\")\n ourselves.keepalive()\n\n # How often do we update. Keep limited to avoid k8s api flooding.\n # Should be slightly less than the lifetime, enough for a patch request to finish.\n await asyncio.sleep(max(1, int(ourselves.lifetime.total_seconds() - 10)))\n finally:\n try:\n ourselves.disappear()\n except:\n pass",
"def refresh_list(self):\n if self._dominfo_lock.acquire(False):\n try:\n return self._refresh_list()\n finally:\n self._dominfo_lock.release()\n else:\n # wait until the refresh done by the other party is complete\n with self._dominfo_lock:\n pass",
"def Broadcast(self, method, *args, **kwargs):\n for peer_id, (host, port, peer) in self.peers.iteritems():\n logging.debug('Calling method %r on peer %r.' % (method, peer_id))\n m = getattr(peer, method)\n m(self.peer_id, *args, **kwargs)",
"async def refresh_schedules(self, now=None):\n tasks = [charger.schedules_async_refresh() for charger in self.chargers_data]\n if tasks:\n await asyncio.wait(tasks)\n self.update_ha_state()",
"def _handle_tracker_contact(self, response):\n peers = response['peers']\n self._try_peers(peers)",
"async def _refresh(self):\n\n # loop.run_in_excutor makes PyiCloud asynchronous, right??\n loop = asyncio.get_event_loop()\n iphone = await loop.run_in_executor(None, self.iphone_api.iphone.location)\n self.iphone = (iphone[\"longitude\"], iphone[\"latitude\"])\n\n await loop.run_in_executor(None, self.fmf_api.refresh_client)\n friend = self.fmf_api.location_of(self.friend_id)\n\n # If able to receive the location of the friend\n if friend:\n self.count = int()\n self.friend = (friend[\"longitude\"], friend[\"latitude\"])\n \n self.coordinates.update(self.iphone, self.friend)\n # Otherwise, wait\n else:\n if self.count == 4:\n print(f\"Unable to find {self.friend_email}\")\n await asyncio.sleep(5)\n self.count += 1\n await self._refresh()",
"def start_peers(self):\n for i in self.nodes:\n i.start()",
"def update(self):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(('', DISCOVERY_PORT))\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(DISCOVERY_TIMEOUT.seconds)\n\n # send query to every device in every network connected to\n LOG.debug('querying hosts on networks: %s', self.networks)\n for network in self.networks:\n for address in network.hosts():\n try:\n sock.sendto(DISCOVERY_PAYLOAD,\n (str(address), DISCOVERY_PORT))\n except OSError as exc:\n LOG.debug('failed to send request', exc_info=exc)\n continue\n\n # wait for responses\n while True:\n try:\n data, _ = sock.recvfrom(64)\n except socket.timeout:\n # no (more) responses received\n break\n\n # skip our own outgoing packet\n if data == DISCOVERY_PAYLOAD:\n continue\n\n # data = ip_address,id,model\n data = data.decode('ascii').split(',')\n if len(data) < 3:\n continue\n\n entry = tuple(data)\n\n if entry not in self.entries:\n self.entries.append(entry)\n\n sock.close()",
"def _resend_subscriptions_and_strategies(self):\n for req in self._ws_jsonrpc_cache:\n self._logger.info('Resending JSONRPCRequest %s', req)\n result = yield self._send(req)\n self._logger.info('Resent JSONRPCRequest, with result: %s', result)",
"async def async_update_programs_and_zones(\n hass: HomeAssistant, entry: ConfigEntry\n) -> None:\n data: RainMachineData = hass.data[DOMAIN][entry.entry_id]\n\n await asyncio.gather(\n *[\n data.coordinators[DATA_PROGRAMS].async_refresh(),\n data.coordinators[DATA_ZONES].async_refresh(),\n ]\n )",
"def poll(self):\n self.get_peers()\n self.get_trackers()\n self.get_files()",
"def watch_for_discovery_messages(self):\n while True:\n message = self.socket_manager.get_discovery_message()\n if message.disconnect == \"1\":\n self.handle_disconnect(message)\n elif message.direction == \"0\":\n self.respond_to_discovery_message(message)\n elif message.direction == \"1\":\n serialized_directory = message.get_payload()\n self.directory.merge_serialized_directory(serialized_directory)\n self.initiate_rtt_calculation()",
"def perTickActions(self, timeNow):\n if self.newRegList:\n facAddrL = [tpl[1] for tpl in self.patch.serviceLookup('RegistryUpdateQueue')\n if not self.patch.group.isLocal(tpl[1])]\n payload = self.newRegList[:]\n for fA in facAddrL:\n msg = RegistryGroupUpdateMsg(self.name + '_groupUpdateMsg',\n self.patch, payload, fA, debug=True)\n self.patch.launch(msg, timeNow)\n self.newRegList = []",
"async def reload_all(ctx):\n await ext_manager.reload_all()\n await ctx.send(\"Successfully reloaded.\")",
"def refresh_client(self):\n req = self.session.post(\n self._fmip_refresh_url,\n params=self.params,\n data=json.dumps(\n {\n \"clientContext\": {\n \"fmly\": self.with_family,\n \"shouldLocate\": True,\n \"selectedDevice\": \"all\",\n \"deviceListVersion\": 1,\n }\n }\n ),\n )\n self.response = req.json()\n\n for device_info in self.response[\"content\"]:\n device_id = device_info[\"id\"]\n if device_id not in self._devices:\n self._devices[device_id] = AppleDevice(\n device_info,\n self.session,\n self.params,\n manager=self,\n sound_url=self._fmip_sound_url,\n lost_url=self._fmip_lost_url,\n message_url=self._fmip_message_url,\n )\n else:\n self._devices[device_id].update(device_info)\n\n if not self._devices:\n raise PyiCloudNoDevicesException()",
"async def _async_refresh_device_detail_by_ids(self, device_ids_list):\n for device_id in device_ids_list:\n try:\n await self._async_refresh_device_detail_by_id(device_id)\n except asyncio.TimeoutError:\n _LOGGER.warning(\n \"Timed out calling august api during refresh of device: %s\",\n device_id,\n )\n except (ClientResponseError, CannotConnect) as err:\n _LOGGER.warning(\n \"Error from august api during refresh of device: %s\",\n device_id,\n exc_info=err,\n )",
"def reconnect(self):\n\t\t# TODO: Make sure the remote devices are actually found?\n\t\tself.setup()\n\t\tself.patch()",
"def keepalive(self):\n self.touch()\n apply_peers([self], name=self.name, namespace=self.namespace, legacy=self.legacy)",
"def _resend_subscriptions(self):\n for req in self._ws_jsonrpc_cache:\n if req.method == 'subscribe':\n self._logger.info('Resending JSONRPCRequest %s', req)\n result = yield self._send(req)\n self._logger.info(\n 'Resent JSONRPCRequest, with result: %s', result)",
"def _wait_for_peer_reboot(self, acceptable_states: Iterable[str], timeout: int = 3600) -> None:\n start = time.time()\n while time.time() - start < timeout:\n if self.peer_redundancy_state == \"failed\":\n log.error(\n \"Host %s: Redundancy state for device %s did not form properly to desired state: %s.\",\n self.host,\n self.host,\n self.peer_redundancy_state,\n )\n break\n\n while time.time() - start < timeout:\n if self.peer_redundancy_state in acceptable_states:\n return\n time.sleep(1)\n\n # TODO: Get proper hostname parameter\n log.error(\"Host %s: reboot timeout error with timeout %s.\", self.host, timeout)\n raise RebootTimeoutError(hostname=f\"{self.host}-peer\", wait_time=timeout)",
"async def async_refresh_devices(hass: HomeAssistant, tern):\n _LOGGER.info(\"refresh devices now\")\n response = await tern.get_entities(\"device\", True)\n devices = response[\"rsp\"][\"entities\"]\n pdata = tern.hass_platform_data\n\n device_registry = await dr.async_get_registry(hass)\n device_registry.async_get_or_create(\n config_entry_id=pdata.hub_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, pdata.mac)},\n identifiers={(DOMAIN, pdata.hub_entry.entry_id)},\n manufacturer=TERNCY_MANU_NAME,\n name=pdata.hub_entry.title,\n model=\"TERNCY-GW01\",\n sw_version=1,\n )\n\n for dev in devices:\n await update_or_create_entity(dev, tern)"
]
| [
"0.6113738",
"0.5776063",
"0.541313",
"0.5374841",
"0.5218789",
"0.5149429",
"0.50723237",
"0.49836946",
"0.49206266",
"0.48936272",
"0.4839092",
"0.48283538",
"0.4768601",
"0.4729838",
"0.4675083",
"0.46661487",
"0.46600488",
"0.46309865",
"0.46273312",
"0.46254832",
"0.46176413",
"0.46143538",
"0.46023816",
"0.45760933",
"0.45734474",
"0.4571005",
"0.45521206",
"0.45289212",
"0.45280656",
"0.45239225"
]
| 0.6712589 | 0 |
Request routerefresh for peer with `peer_ip` for given `route_families`. Will make routerefresh request for a given `route_family` only if such capability is supported and if peer is in ESTABLISHED state. Else, such requests are ignored. Raises appropriate error in other cases. If `peer_ip` is equal to 'all' makes refresh request to all valid peers. | def make_route_refresh_request(self, peer_ip, *route_families):
LOG.debug('Route refresh requested for peer %s and route families %s',
peer_ip, route_families)
if not SUPPORTED_GLOBAL_RF.intersection(route_families):
raise ValueError('Given route family(s) % is not supported.' %
route_families)
peer_list = []
# If route-refresh is requested for all peers.
if peer_ip == 'all':
peer_list.extend(self.get_peers_in_established())
else:
given_peer = self._peers.get(peer_ip)
if not given_peer:
raise ValueError('Invalid/unrecognized peer %s' % peer_ip)
if not given_peer.in_established:
raise ValueError('Peer currently do not have established'
' session.')
peer_list.append(given_peer)
# Make route refresh request to valid peers.
for peer in peer_list:
peer.request_route_refresh(*route_families)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_route_refresh(peer_ip):\n LOG.debug('Try to send route refresh to peer %s', peer_ip)\n json_request = flask.request.get_json()\n if 'afi' in json_request and 'safi' in json_request:\n if 'res' not in json_request:\n res = 0\n else:\n res = json_request['res']\n result = api_utils.send_route_refresh(\n peer_ip=peer_ip, afi=json_request['afi'], safi=json_request['safi'], res=res)\n return flask.jsonify(result)\n return flask.jsonify({\n 'status': False,\n 'code': 'please check your post data'\n })",
"def req_rr_to_non_rtc_peers(self, route_family):\n assert route_family != RF_RTC_UC\n for peer in self._peers.values():\n # First check if peer is in established state\n if (peer.in_established and\n # Check if peer has valid capability for given address\n # family\n peer.is_mbgp_cap_valid(route_family) and\n # Check if peer has valid capability for RTC\n not peer.is_mbgp_cap_valid(RF_RTC_UC)):\n peer.request_route_refresh(route_family)",
"def resend_sent(self, route_family, peer):\n if peer not in self._peers.values():\n raise ValueError('Could not find given peer (%s)' % peer)\n\n if route_family not in SUPPORTED_GLOBAL_RF:\n raise ValueError(\n 'Given route family (%s) is not supported.' % route_family\n )\n\n # Iterate over the global table for given afi, safi and enqueue\n # out-going routes.\n table = self._table_manager.get_global_table_by_route_family(\n route_family\n )\n\n for destination in table.values():\n # Check if this destination's sent - routes include this peer.\n # i.e. check if this destinations was advertised and enqueue\n # the path only if it was. If the current best-path has not been\n # advertised before, it might already have a OutgoingRoute queued\n # to be sent to the peer.\n sent_routes = destination.sent_routes\n if sent_routes is None or len(sent_routes) == 0:\n continue\n for sent_route in sent_routes:\n if sent_route.sent_peer == peer:\n # update med - if previously med was set per neighbor or\n # wasn't set at all now it could have changed and we may\n # need to set new value there\n p = sent_route.path\n if p.med_set_by_target_neighbor or p.get_pattr(\n BGP_ATTR_TYPE_MULTI_EXIT_DISC) is None:\n sent_route.path = \\\n clone_path_and_update_med_for_target_neighbor(\n sent_route.path, peer.med\n )\n\n ogr = OutgoingRoute(sent_route.path,\n for_route_refresh=True)\n peer.enque_outgoing_msg(ogr)",
"def reboot_standby(self, acceptable_states: Optional[Iterable[str]] = None, timeout: Optional[int] = None) -> None:\n if acceptable_states is None:\n acceptable_states = [self.peer_redundancy_state]\n kwargs = {\"acceptable_states\": acceptable_states}\n if timeout is not None:\n kwargs[\"timeout\"] = timeout\n self.show(\"failover reload-standby\")\n self._wait_for_peer_reboot(**kwargs)\n\n log.debug(\"Host %s: reboot standby with timeout %s.\", self.host, timeout)",
"def on_peer_down(self, peer):\n LOG.debug('Cleaning obsolete paths whose source/version: %s/%s',\n peer.ip_address, peer.version_num)\n # Launch clean-up for each global tables.\n self._table_manager.clean_stale_routes(peer)",
"def do_reconfigure(self, op, behavior_params=None):\n queryenv_answer = self.queryenv.list_farm_role_params(__node__['farm_role_id'])\n queryenv_behavior_params = queryenv_answer['params']\n if not behavior_params:\n behavior_params = queryenv_behavior_params\n\n behaviors = behavior_params.keys()\n for behavior in behaviors:\n if behavior not in behavior_apis:\n continue\n api = behavior_apis[behavior]()\n #TODO:\n reconfigure_argspecs = inspect.getargspec(api.reconfigure).args\n reconfigure_argspecs.remove('self')\n\n queryenv_reconf_params = queryenv_behavior_params.get(behavior, {})\n\n reconfigure_params = behavior_params.get(behavior, {})\n if reconfigure_params == None:\n reconfigure_params = queryenv_reconf_params\n reconfigure_params = dict((k, v)\n for k, v in reconfigure_params.items()\n if k in reconfigure_argspecs)\n\n if hasattr(api, 'init_service'):\n api.init_service()\n api.do_reconfigure(op, **reconfigure_params)",
"def _wait_for_peer_reboot(self, acceptable_states: Iterable[str], timeout: int = 3600) -> None:\n start = time.time()\n while time.time() - start < timeout:\n if self.peer_redundancy_state == \"failed\":\n log.error(\n \"Host %s: Redundancy state for device %s did not form properly to desired state: %s.\",\n self.host,\n self.host,\n self.peer_redundancy_state,\n )\n break\n\n while time.time() - start < timeout:\n if self.peer_redundancy_state in acceptable_states:\n return\n time.sleep(1)\n\n # TODO: Get proper hostname parameter\n log.error(\"Host %s: reboot timeout error with timeout %s.\", self.host, timeout)\n raise RebootTimeoutError(hostname=f\"{self.host}-peer\", wait_time=timeout)",
"def _del_bgp_speaker_fip_route(self, context, bgp_speaker_id, topic, cidr):\n\n bgp_speaker = self.nb_api.get(bgp.BGPSpeaker(id=bgp_speaker_id,\n topic=topic))\n current_routes = {str(r.destination): r\n for r in bgp_speaker.host_routes}\n if cidr not in current_routes:\n # Route has not been added, skip.\n return\n\n del current_routes[cidr]\n bgp_speaker.host_routes = current_routes.values()\n self.nb_api.update(bgp_speaker, skip_send_event=True)",
"async def _match_urgent_node_requests_to_peers(self) -> None:\n while self.manager.is_running:\n urgent_batch_id, urgent_hashes = await self._node_tasks.get(\n eth_constants.MAX_STATE_FETCH\n )\n\n # Get best peer, by GetNodeData speed\n queen = await self._queen_tracker.get_queen_peer()\n\n queen_is_requesting = queen.eth_api.get_node_data.is_requesting\n\n if queen_is_requesting:\n # Our best peer for node data has an in-flight GetNodeData request\n # Probably, backfill is asking this peer for data\n # This is right in the critical path, so we'd prefer this never happen\n self.logger.debug(\n \"Want to download urgent data, but %s is locked on other request\",\n queen,\n )\n # Don't do anything different, allow the request lock to handle the situation\n\n self._num_urgent_requests_by_peer[queen] += 1\n self._urgent_requests += 1\n\n await self._find_urgent_nodes(\n queen,\n urgent_hashes,\n urgent_batch_id,\n )",
"def _add_bgp_speaker_fip_route(self, context,\n bgp_speaker_id, topic, route):\n\n bgp_speaker = self.nb_api.get(bgp.BGPSpeaker(id=bgp_speaker_id,\n topic=topic))\n # Since all routable cidrs are in one address scope, they should be\n # unique in such context.\n current_routes = {str(r.destination): r\n for r in bgp_speaker.host_routes}\n cidr = route['destination']\n if (cidr in current_routes and\n route == current_routes[cidr].to_struct()):\n # Nothing changes, skip.\n return\n\n current_routes[cidr] = route\n bgp_speaker.host_routes = current_routes.values()\n self.nb_api.update(bgp_speaker, skip_send_event=True)",
"def apply_peers(\n peers: Iterable[Peer],\n name: str,\n namespace: Union[None, str],\n legacy: bool = False,\n):\n patch = {'status': {peer.id: None if peer.is_dead else peer.as_dict() for peer in peers}}\n resource = (LEGACY_PEERING_RESOURCE if legacy else\n CLUSTER_PEERING_RESOURCE if namespace is None else\n NAMESPACED_PEERING_RESOURCE)\n patching.patch_obj(resource=resource, namespace=namespace, name=name, patch=patch)",
"def test_bgp_neighbour_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|ip|remoteAS|protocolIp|forwardingIp',\n dlr_bgp_neighbour.delete,\n {},\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )",
"def refresh_firewall(self, device_ids=None):\n if not device_ids:\n device_ids = self.firewall.ports.keys()\n if not device_ids:\n LOG.info(_LI(\"No ports here to refresh firewall.\"))\n return\n LOG.info(_LI(\"Refresh firewall rules for %s ports.\"), len(device_ids))\n self._process_port_set(set(device_ids), True)",
"def check_referrers_consistency(self):\n logger.info(\"Checking for inconsistent referrers field\")\n assigned_fix_reqs = set()\n\n # Get all assigned fixture requests. Ensure that the instance has the fixture request in it's list of referrers\n # If instance does not have the referrer, add it.\n for fix_req in self.reqproc.requestdb.items(assigned=True):\n assigned_fix_reqs.add(fix_req.service_id)\n for fix_dict in fix_req.assignment.values():\n with lock_instance(fix_dict['id']):\n instance = self.get_fixture_instance(fix_dict['id'], verify_exists=False)\n if not instance:\n logger.error(\"Reservation held on a an instance which no longer exists\")\n continue\n if not instance.has_referrer(fix_req.service_id):\n logger.warning(\"%s was missing a referrer %s. Correcting state\", instance, fix_req.service_id)\n instance.add_referrer(fix_req.referrer())\n self._persist_instance_updates(instance)\n\n # Reverse correction from above.\n # Get all instances which have referrers, ensure the referrers corresponds to a valid fixture request (which we accumulated above)\n # If the referrer field corresponds to an fixture request which does not exist, remove the referrer.\n for fix_doc in self.instances.find({'referrers': {'$exists': True, '$not': {'$size': 0}}}):\n instance = FixtureInstance.deserialize_mongodoc(fix_doc)\n to_remove = []\n for referrer in instance.referrers:\n if referrer['service_id'] not in assigned_fix_reqs:\n logger.warning(\"%s has a referrer %s which no longer exists. Correcting state\", instance, referrer['service_id'])\n to_remove.append(referrer['service_id'])\n if to_remove:\n with lock_instance(instance.id):\n instance = self.get_fixture_instance(instance.id)\n for service_id in to_remove:\n instance.remove_referrer(service_id)\n self._persist_instance_updates(instance)",
"def test_bgp_neighbour_filter_uninstall(self):\n self._common_uninstall_read_update(\n 'net|esg_id|ip|remoteAS|protocolIp|forwardingIp',\n bgp_neighbour_filter.delete,\n {},\n # read\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n # update\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict':\n test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )",
"def fusion_api_refresh_switch(self, uri, api=None, headers=None):\n return self.switch.refresh(uri, api, headers)",
"def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)",
"def RoutingInterfaceNotificationRefresh(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def _collect_peers_of_interest(self, new_best_path):\n path_rts = new_best_path.get_rts()\n qualified_peers = set(self._peers.values())\n\n # Filter out peers based on RTC_AS setting if path is for RT_NLRI\n qualified_peers = self._rt_manager.filter_by_origin_as(\n new_best_path, qualified_peers\n )\n\n # We continue to filter out qualified peer based on path RTs\n # If new best path has RTs, we need to share this UPDATE with\n # qualifying peers\n if path_rts:\n # We add Default_RTC_NLRI to path RTs so that we can send it to\n # peers that have expressed interest in all paths\n path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)\n # All peers that do not have RTC capability qualify\n qualified_peers = set(self._get_non_rtc_peers())\n # Peers that have RTC capability and have common RT with the path\n # also qualify\n peer_to_rtfilter_map = self._peer_to_rtfilter_map\n for peer, rt_filter in peer_to_rtfilter_map.items():\n # Ignore Network Controller (its not a BGP peer)\n if peer is None:\n continue\n\n if rt_filter is None:\n qualified_peers.add(peer)\n elif rt_filter.intersection(path_rts):\n qualified_peers.add(peer)\n\n return qualified_peers",
"def refresh_fqdn_cache(force=False):\n if not isinstance(force, bool):\n raise CommandExecutionError(\"Force option must be boolean.\")\n\n if force:\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><fqdn><refresh><force>yes</force></refresh></fqdn></system></request>\",\n }\n else:\n query = {\n \"type\": \"op\",\n \"cmd\": (\n \"<request><system><fqdn><refresh></refresh></fqdn></system></request>\"\n ),\n }\n\n return __proxy__[\"panos.call\"](query)",
"def test_fib_route(ip=None, vr=\"vr1\"):\n\n xpath = \"<test><routing><fib-lookup>\"\n\n if ip:\n xpath += \"<ip>{}</ip>\".format(ip)\n\n if vr:\n xpath += \"<virtual-router>{}</virtual-router>\".format(vr)\n\n xpath += \"</fib-lookup></routing></test>\"\n\n query = {\"type\": \"op\", \"cmd\": xpath}\n\n return __proxy__[\"panos.call\"](query)",
"def mutate(self, input_route: List[int], mutation_probability: float = 0.2) -> List[int]:\n route = input_route.copy()\n for k in range(len(route)):\n if random.random() < mutation_probability:\n self._swap(route)\n\n # Make sure that at least one change is made to input route\n if route == input_route:\n self._swap(route)\n\n return route",
"def get_hosts_fanout_retry(self, target, listener_type):",
"def RoutingInterfaceNotificationRefresh(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def find_hot_routes(fname=FNAME, nRoutes=NROUTES, nRides=NRIDES, keepBest=5,\n randSeed=SEED, oname=ONAME):\n df = load_data(fname)\n bestyet = {}\n\n for route in routes(df, nRoutes):\n sampleRides = sample_rides(df, nRides)\n fa = fAvg(route, sampleRides)\n bestyet = best_yet(bestyet, fa, route, keepBest)\n\n return bestyet",
"def resolve_routes(\n routes: List[RouteMetadata],\n token_network_address: TokenNetworkAddress,\n chain_state: ChainState,\n) -> List[RouteState]:\n\n resolvable = []\n for route_metadata in routes:\n if len(route_metadata.route) < 2:\n continue\n\n channel_state = views.get_channelstate_by_token_network_and_partner(\n chain_state=chain_state,\n token_network_address=token_network_address,\n partner_address=route_metadata.route[1],\n )\n\n if channel_state is not None:\n resolvable.append(\n RouteState(\n route=route_metadata.route,\n # This is only used in the mediator, so fees are set to 0\n estimated_fee=FeeAmount(0),\n )\n )\n return resolvable",
"def _catch_all_reshard(fsdp_module: FullyShardedDataParallel) -> None:\n # Note that we wrap resharding logic in a try-catch as a defensive\n # approach, as if an error is thrown, we are in the backwards pass,\n # and autograd would not print out much useful info about the actual\n # error hit.\n try:\n free_unsharded_flat_params: List[bool] = []\n handles_to_reshard: List[FlatParamHandle] = []\n for handle in fsdp_module._handles:\n # TODO: This already-resharded check is brittle:\n # https://github.com/pytorch/pytorch/issues/83956\n already_resharded = (\n handle.flat_param.data_ptr() == handle.flat_param._local_shard.data_ptr()\n )\n if already_resharded:\n continue\n free_unsharded_flat_params.append(self._should_free_unsharded_flat_param(handle))\n handles_to_reshard.append(handle)\n self._reshard(handles_to_reshard, free_unsharded_flat_params)\n except Exception as e:\n p_assert(\n False,\n f\"Got exception while resharding module {fsdp_module}: {str(e)}\",\n raise_assertion_error=False\n )\n raise e",
"def spoof(target_ip, spoof_ip):\n arp_answer = scapy.ARP(\n op=2,\n pdst=target_ip,\n hwdst=get_mac(target_ip),\n psrc=spoof_ip\n )\n scapy.send(arp_answer, verbose=False)",
"def _update_bgp_speaker_routes(self, context, bgp_speaker_id, topic):\n\n prefixes = self._get_tenant_network_routes_by_bgp_speaker(\n context, bgp_speaker_id)\n # Translate to the format of dragonflow db data.\n prefix_routes = [{'destination': x['destination'],\n 'nexthop': x['next_hop']} for x in prefixes]\n\n host_routes = []\n for _net_id, host, addr in self._get_fip_query(\n context, bgp_speaker_id).all():\n external_ip = self._get_external_ip_by_host(host)\n if not external_ip:\n continue\n\n host_routes.append({'destination': addr + '/32',\n 'nexthop': external_ip})\n\n lean_bgp_speaker = bgp.BGPSpeaker(id=bgp_speaker_id,\n topic=topic,\n prefix_routes=prefix_routes,\n host_routes=host_routes)\n self.nb_api.update(lean_bgp_speaker, skip_send_event=True)",
"async def peers_keepalive(\n *,\n ourselves: Peer,\n):\n try:\n while True:\n logger.debug(f\"Peering keep-alive update for {ourselves.id} (priority {ourselves.priority})\")\n ourselves.keepalive()\n\n # How often do we update. Keep limited to avoid k8s api flooding.\n # Should be slightly less than the lifetime, enough for a patch request to finish.\n await asyncio.sleep(max(1, int(ourselves.lifetime.total_seconds() - 10)))\n finally:\n try:\n ourselves.disappear()\n except:\n pass"
]
| [
"0.6576246",
"0.651089",
"0.51476836",
"0.44330993",
"0.43338594",
"0.4328826",
"0.4319273",
"0.43107876",
"0.42995644",
"0.42772868",
"0.42183658",
"0.42091832",
"0.42060584",
"0.41908076",
"0.41816354",
"0.41066763",
"0.409145",
"0.40811968",
"0.40530893",
"0.39563677",
"0.39323723",
"0.38340518",
"0.3833706",
"0.38327008",
"0.38267127",
"0.38151926",
"0.38032967",
"0.3791188",
"0.37908602",
"0.37883526"
]
| 0.8223286 | 0 |
Shares/communicates current best rt_nlri paths with this peers. Can be used to send initial updates after we have established session with `peer` with which RTC capability is valid. Takes into account peers RTC_AS setting and filters all RT NLRIs whose origin AS do not match this setting. | def comm_all_rt_nlris(self, peer):
# First check if for this peer mpbgp-rtc is valid.
if not peer.is_mbgp_cap_valid(RF_RTC_UC):
return
neigh_conf = self._neighbors_conf.get_neighbor_conf(peer.ip_address)
peer_rtc_as = neigh_conf.rtc_as
# Iterate over all RT_NLRI destination communicate qualifying RT_NLRIs
rtc_table = self._table_manager.get_rtc_table()
for dest in rtc_table.values():
best_path = dest.best_path
# Ignore a destination that currently does not have best path
if not best_path:
continue
# If this is a local path
if best_path.source is None:
# Check RT NLRI's origin AS matches peer RTC_AS setting
origin_as = best_path.nlri.origin_as
if origin_as == peer_rtc_as:
peer.communicate_path(best_path)
else:
# Communicate all remote RT NLRIs
peer.communicate_path(best_path)
# Also communicate EOR as per RFC
peer.enque_end_of_rib(RF_RTC_UC) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _collect_peers_of_interest(self, new_best_path):\n path_rts = new_best_path.get_rts()\n qualified_peers = set(self._peers.values())\n\n # Filter out peers based on RTC_AS setting if path is for RT_NLRI\n qualified_peers = self._rt_manager.filter_by_origin_as(\n new_best_path, qualified_peers\n )\n\n # We continue to filter out qualified peer based on path RTs\n # If new best path has RTs, we need to share this UPDATE with\n # qualifying peers\n if path_rts:\n # We add Default_RTC_NLRI to path RTs so that we can send it to\n # peers that have expressed interest in all paths\n path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)\n # All peers that do not have RTC capability qualify\n qualified_peers = set(self._get_non_rtc_peers())\n # Peers that have RTC capability and have common RT with the path\n # also qualify\n peer_to_rtfilter_map = self._peer_to_rtfilter_map\n for peer, rt_filter in peer_to_rtfilter_map.items():\n # Ignore Network Controller (its not a BGP peer)\n if peer is None:\n continue\n\n if rt_filter is None:\n qualified_peers.add(peer)\n elif rt_filter.intersection(path_rts):\n qualified_peers.add(peer)\n\n return qualified_peers",
"def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)",
"def req_rr_to_non_rtc_peers(self, route_family):\n assert route_family != RF_RTC_UC\n for peer in self._peers.values():\n # First check if peer is in established state\n if (peer.in_established and\n # Check if peer has valid capability for given address\n # family\n peer.is_mbgp_cap_valid(route_family) and\n # Check if peer has valid capability for RTC\n not peer.is_mbgp_cap_valid(RF_RTC_UC)):\n peer.request_route_refresh(route_family)",
"def clone_rtcpath_update_rt_as(path, new_rt_as):\n assert path and new_rt_as\n if not path or path.route_family != RF_RTC_UC:\n raise ValueError('Expected RT_NLRI path')\n old_nlri = path.nlri\n new_rt_nlri = RouteTargetMembershipNLRI(new_rt_as, old_nlri.route_target)\n return RtcPath(path.source, new_rt_nlri, path.source_version_num,\n pattrs=path.pathattr_map, nexthop=path.nexthop,\n is_withdraw=path.is_withdraw)",
"def unison_sync(paths_to_sync):\n log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),\n level=INFO)\n keystone_gid = grp.getgrnam('keystone').gr_gid\n\n # NOTE(dosaboy): This will sync to all peers who have already provided\n # their ssh keys. If any existing peers have not provided their keys yet,\n # they will be silently ignored.\n unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,\n user=SSH_USER, verbose=True, gid=keystone_gid,\n fatal=True)\n\n synced_units = peer_units()\n if len(unison.collect_authed_hosts('cluster')) != len(synced_units):\n log(\"Not all peer units synced due to missing public keys\", level=INFO)\n return None\n else:\n return synced_units",
"def resend_sent(self, route_family, peer):\n if peer not in self._peers.values():\n raise ValueError('Could not find given peer (%s)' % peer)\n\n if route_family not in SUPPORTED_GLOBAL_RF:\n raise ValueError(\n 'Given route family (%s) is not supported.' % route_family\n )\n\n # Iterate over the global table for given afi, safi and enqueue\n # out-going routes.\n table = self._table_manager.get_global_table_by_route_family(\n route_family\n )\n\n for destination in table.values():\n # Check if this destination's sent - routes include this peer.\n # i.e. check if this destinations was advertised and enqueue\n # the path only if it was. If the current best-path has not been\n # advertised before, it might already have a OutgoingRoute queued\n # to be sent to the peer.\n sent_routes = destination.sent_routes\n if sent_routes is None or len(sent_routes) == 0:\n continue\n for sent_route in sent_routes:\n if sent_route.sent_peer == peer:\n # update med - if previously med was set per neighbor or\n # wasn't set at all now it could have changed and we may\n # need to set new value there\n p = sent_route.path\n if p.med_set_by_target_neighbor or p.get_pattr(\n BGP_ATTR_TYPE_MULTI_EXIT_DISC) is None:\n sent_route.path = \\\n clone_path_and_update_med_for_target_neighbor(\n sent_route.path, peer.med\n )\n\n ogr = OutgoingRoute(sent_route.path,\n for_route_refresh=True)\n peer.enque_outgoing_msg(ogr)",
"def frt_stay_connected_low(self, params=None):\n if self.inv is None:\n raise der.DERError('DER not initialized')\n\n try:\n if params is not None:\n ena = params.get('Ena')\n if ena is not None:\n if ena is True:\n self.inv.lfrtc.ModEna = 1\n else:\n self.inv.lfrtc.ModEna = 0\n act_crv = params.get('ActCrv')\n if act_crv is not None:\n self.inv.lfrtc.ActCrv = act_crv\n win_tms = params.get('WinTms')\n if win_tms is not None:\n self.inv.lfrtc.WinTms = win_tms\n rmp_tms = params.get('RmpTms')\n if rmp_tms is not None:\n self.inv.lfrtc.RmpTms = rmp_tms\n rvrt_tms = params.get('RvrtTms')\n if rvrt_tms is not None:\n self.inv.lfrtc.RvrtTms = rvrt_tms\n for i in xrange(1, params['NPt'] + 1): # Uses the SunSpec indexing rules (start at 1)\n time_point = 'Tms%d' % i\n param_time_point = params.get(time_point)\n if param_time_point is not None:\n setattr(self.inv.hfrtc.l_curve[h_curve_num], time_point, param_time_point)\n freq_point = 'F%d' % i\n param_freq_point = params.get(freq_point)\n if param_freq_point is not None:\n setattr(self.inv.hfrtc.l_curve[h_curve_num], freq_point, param_freq_point)\n self.inv.hfrtc.write()\n else:\n params = {}\n self.inv.lfrtc.read()\n if self.inv.lfrtc.ModEna == 0:\n params['Ena'] = False\n else:\n params['Ena'] = True\n params['ActCrv'] = self.inv.lfrtc.ActCrv\n params['NCrv'] = self.inv.lfrtc.NCrv\n params['NPt'] = self.inv.lfrtc.NPt\n params['WinTms'] = self.inv.lfrtc.WinTms\n params['RmpTms'] = self.inv.lfrtc.RmpTms\n params['RvrtTms'] = self.inv.lfrtc.RvrtTms\n\n except Exception, e:\n raise der.DERError(str(e))\n\n return params",
"def ResortPeers(self):\n \n self.sortedPeerList = []\n append = self.sortedPeerList.append\n for i in self.peerDatabase.keys():\n append((self.peerDatabase[i].RemainingRemoteStorage(), i))\n self.sortedPeerList.sort()\n self.sortedPeerList.reverse()",
"def comm_new_best_to_bgp_peers(self, new_best_path):\n # Filter based on standard community\n # If new best path has community attribute, it should be taken into\n # account when sending UPDATE to peers.\n comm_attr = new_best_path.get_pattr(BGP_ATTR_TYPE_COMMUNITIES)\n if comm_attr:\n comm_attr_na = comm_attr.has_comm_attr(\n BGPPathAttributeCommunities.NO_ADVERTISE\n )\n # If we have NO_ADVERTISE attribute is present, we do not send\n # UPDATE to any peers\n if comm_attr_na:\n LOG.debug('New best path has community attr. NO_ADVERTISE = %s'\n '. Hence not advertising to any peer', comm_attr_na)\n return\n\n qualified_peers = self._collect_peers_of_interest(\n new_best_path\n )\n\n # Distribute new best-path to qualified peers.\n for peer in qualified_peers:\n peer.communicate_path(new_best_path)",
"def resolve(self):\n # Initialize the winner chain with the local chain\n winner_chain = self.chain\n replace = False\n for node in self.__peer_nodes:\n url = 'http://{}/chain'.format(node)\n try:\n # Send a request and store the response\n response = requests.get(url)\n # Retrieve the JSON data as a dictionary\n node_chain = response.json()\n # Convert the dictionary list to a list of block AND transaction objects\n node_chain = [Block(block['index'], block['previous_hash'], [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']],\n [Chipsaction(tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']],\n [Messsaction(tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']],\n block['proof'], block['timestamp']) for block in node_chain]\n node_chain_length = len(node_chain)\n local_chain_length = len(winner_chain)\n # Store the received chain as the current winner chain if it's longer AND valid\n if node_chain_length > local_chain_length and Verification.verify_chain(node_chain):\n winner_chain = node_chain\n replace = True\n except requests.exceptions.ConnectionError:\n continue\n self.resolve_conflicts = False\n # Replace the local chain with the winner chain\n self.chain = winner_chain\n if replace:\n self.__open_transactions = []\n self.__open_chipsactions = []\n self.__open_messsactions = []\n self.save_data()\n return replace",
"def filter_relationships(self, recRelation, routes, src, is_forward=False, is_update=False):\n dict_entry = \"src\" if is_update else \"peer\"\n outroutes = []\n if recRelation == CUST:\n if is_forward:\n return routes\n\n for val in routes:\n if val[dict_entry] != src: \n outroutes.append(val) \n\n return outroutes\n for val in routes: \n ip = val[dict_entry]\n relation = self.relations[ip] \n if relation == CUST: \n outroutes.append(val)\n \n return outroutes",
"def processTradeRoutes(self):\n try:\n nextRound = self.currentRound+1\n resultslist = []\n for trID in self.tradeRoutes.keys():\n myTradeRoute = self.tradeRoutes[trID]\n (systemFromID, systemToID, tradeRouteType) = string.split(trID, '-')\n systemFrom = self.systems[systemFromID]\n systemTo = self.systems[systemToID]\n cancel = 0\n warpReq = 0\n # choose trade route type\n if tradeRouteType == 'GEN':\n # update what system sends based on what it makes\n myTradeRoute.AL = systemFrom.prodAL\n myTradeRoute.EC = systemFrom.prodEC\n myTradeRoute.IA = systemFrom.prodIA\n \n # check if trade route is adjacent or requires warp gate capacity\n if systemTo.id in systemFrom.warpGateSystems:\n warpReq = myTradeRoute.getWarpRequired()\n if warpReq > (systemFrom.availWGC-systemFrom.usedWGC) or warpReq > (systemTo.availWGC-systemTo.usedWGC):\n cancel = 1\n elif systemTo.id not in systemFrom.connectedSystems:\n cancel = 1\n \n if (systemFrom.AL >= myTradeRoute.AL and\n systemFrom.EC >= myTradeRoute.EC and\n systemFrom.IA >= myTradeRoute.IA and \n cancel == 0):\n # process trade route\n systemFrom.AL -= myTradeRoute.AL\n systemFrom.EC -= myTradeRoute.EC\n systemFrom.IA -= myTradeRoute.IA\n systemTo.AL += myTradeRoute.AL\n systemTo.EC += myTradeRoute.EC\n systemTo.IA += myTradeRoute.IA\n # deduct properly if empires are different\n empireFrom = self.empires[systemFrom.myEmpireID]\n empireTo = self.empires[systemTo.myEmpireID]\n if empireFrom <> empireTo:\n empireFrom.AL -= myTradeRoute.AL\n empireFrom.EC -= myTradeRoute.EC\n empireFrom.IA -= myTradeRoute.IA\n empireTo.AL += myTradeRoute.AL\n empireTo.EC += myTradeRoute.EC\n empireTo.IA += myTradeRoute.IA\n \n if warpReq > 0:\n systemFrom.usedWGC += warpReq\n systemTo.usedWGC += warpReq\n \n # mail trade route completion\n resultslist.append('Trade from System:%s to System:%s complete' % (systemFrom.id, systemTo.id))\n self.mailTradeInfo('completed', myTradeRoute, nextRound)\n else:\n cancel = 1\n \n # check if route should be cancelled\n if cancel == 1:\n resultslist.append('cancel trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n elif myTradeRoute.oneTime == 1:\n resultslist.append('one time trade route=%s' % myTradeRoute.id)\n self.cancelTradeRoute(myTradeRoute.id, nextRound)\n \n return str(resultslist)\n except:\n return 'galaxy->processTradeRoutes error'",
"def rrt_search(self):\n self.tree.AddVertex(self.start_config)\n self.tree.AddEdge(self.start_config, self.start_config)\n\n while True:\n x_new, x_nearest = self.new_and_near()\n if x_new is None:\n # print(\"it's None\")\n continue\n # connect shortest valid edge\n # print(\"new point\", x_new)\n self.connect_to_point(x_nearest, x_new)\n\n # probabilistically check if solution found\n if self.goal_config in self.tree.vertices:\n print(\"find it\")\n path = self.planning_env.reconstruct_path(self.tree.edges, self.start_config, self.goal_config)\n if path is not None:\n return path\n\n if self.name=='rrtstar' and self.tree.samples_taken > 10:\n return []\n # # check if can connect to goal after generating max_samples\n if self.tree.samples_taken >= self.tree.max_samples:\n return []",
"def test_combine_peer_stats(self):\n tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, cbsa=request.GET.get('metro'))\n metro = Geo.objects.get(geo_type=Geo.METRO_TYPE, geoid=request.GET.get('metro'))\n lender = Institution.objects.get(institution_id=request.GET.get('lender'))\n peers = lender.get_peer_list(metro, None, None)\n peer_data_collector = []\n for peer in peers:\n peer_request = HttpRequest()\n peer_request.GET['lender'] = peer.institution.institution_id\n peer_request.GET['metro']= metro.geoid\n peer_lar_data = loan_originations_as_json(peer_request)\n peer_data_collector.append(assemble_stats(peer_lar_data, tracts))\n peer_stats = combine_peer_stats(peer_data_collector)\n self.assertEqual(peer_stats['hma_pct'], 0.0)\n self.assertEqual(peer_stats['lma_pct'], 1.0)\n self.assertEqual(peer_stats['mma_pct'], 0.0)\n self.assertEqual(peer_stats['lma'], 7)\n self.assertEqual(peer_stats['mma'], 0)\n self.assertEqual(peer_stats['hma'], 0)\n self.assertEqual(peer_stats['lar_total'], 7)",
"def __request_virdir(self):\n for pn in self.peernames:\n with socket.socket() as tmpsock:\n tmpsock.connect(tuple(pn))\n\n # Solicitud\n tmpsock.send(message.REQDIRMSG)\n header = tmpsock.recv(5)\n\n if header[0] != message.GIVEDIR:\n raise ValueError(\n \"[SHARE] error al solicitar archivos a\", pn)\n\n # Primero se le piden sus archivos\n bodysize = int.from_bytes(header[1:5], byteorder=\"big\")\n body = tmpsock.recv(bodysize)\n port, sharelist = message.parse_file_bytes(body, bodysize)\n self.__add_sharefiles(sharelist, pn)\n\n # Despues le comparte los suyos\n tmpsock.send(message.build_givedir_message(\n self.port, self.files))",
"def test_remote_asns_non_spine(bf: Session, sot: SoT):\n peer_props = bf.q.bgpPeerConfiguration(nodes=f\"{SNAPSHOT_NODES_SPEC} \\\\ /spine.*/\").answer().frame()\n for _, row in peer_props.iterrows():\n node_name = row[\"Node\"]\n assert row[\"Peer_Group\"], \"Peer group is not set for neighbor {} on {}\".format(row[\"Remote_IP\"], node_name)\n peer_group = row[\"Peer_Group\"].lower()\n if peer_group.startswith(\"isp\"):\n continue\n asn_range = sot.get_peer_group_asn_range(peer_group)\n assert int(row[\"Remote_AS\"]) in asn_range, \\\n \"Remote AS of neighbor {} ({}) in peer group '{}' on {} is outside of the expected range {}\".format(\n row[\"Remote_IP\"], row[\"Remote_AS\"], peer_group, node_name, asn_range)",
"def link_routing_slips(cls):\n routing_slips = cls._get_routing_slip_by_status(RoutingSlipStatus.LINKED.value)\n for routing_slip in routing_slips:\n # 1. Reverse the child routing slip.\n # 2. Create receipt to the parent.\n # 3. Change the payment account of child to parent.\n # 4. Change the status.\n try:\n current_app.logger.debug(f'Linking Routing Slip: {routing_slip.number}')\n payment_account: PaymentAccountModel = PaymentAccountModel.find_by_id(\n routing_slip.payment_account_id)\n cfs_account: CfsAccountModel = CfsAccountModel.find_effective_by_account_id(\n payment_account.id)\n\n # reverse routing slip receipt\n if CFSService.get_receipt(cfs_account, routing_slip.number).get('status') != CfsReceiptStatus.REV.value:\n CFSService.reverse_rs_receipt_in_cfs(cfs_account, routing_slip.number, ReverseOperation.LINK.value)\n cfs_account.status = CfsAccountStatus.INACTIVE.value\n\n # apply receipt to parent cfs account\n parent_rs: RoutingSlipModel = RoutingSlipModel.find_by_number(routing_slip.parent_number)\n parent_payment_account: PaymentAccountModel = PaymentAccountModel.find_by_id(\n parent_rs.payment_account_id)\n parent_cfs_account: CfsAccountModel = CfsAccountModel.find_effective_by_account_id(\n parent_payment_account.id)\n # For linked routing slip receipts, append 'L' to the number to avoid duplicate error\n receipt_number = routing_slip.generate_cas_receipt_number()\n CFSService.create_cfs_receipt(cfs_account=parent_cfs_account,\n rcpt_number=receipt_number,\n rcpt_date=routing_slip.routing_slip_date.strftime('%Y-%m-%d'),\n amount=routing_slip.total,\n payment_method=parent_payment_account.payment_method,\n access_token=CFSService.get_fas_token().json().get('access_token'))\n\n # Add to the list if parent is NSF, to apply the receipts.\n if parent_rs.status == RoutingSlipStatus.NSF.value:\n total_invoice_amount = cls._apply_routing_slips_to_pending_invoices(parent_rs)\n current_app.logger.debug(f'Total Invoice Amount : {total_invoice_amount}')\n # Update the parent routing slip status to ACTIVE\n parent_rs.status = RoutingSlipStatus.ACTIVE.value\n # linking routing slip balance is transferred ,so use the total\n parent_rs.remaining_amount = routing_slip.total - total_invoice_amount\n\n routing_slip.save()\n\n except Exception as e: # NOQA # pylint: disable=broad-except\n capture_message(\n f'Error on Linking Routing Slip number:={routing_slip.number}, '\n f'routing slip : {routing_slip.id}, ERROR : {str(e)}', level='error')\n current_app.logger.error(e)\n continue",
"def frt_stay_connected_high(self, params=None):\n if self.inv is None:\n raise der.DERError('DER not initialized')\n\n try:\n if params is not None:\n ena = params.get('Ena')\n if ena is not None:\n if ena is True:\n self.inv.hfrtc.ModEna = 1\n else:\n self.inv.hfrtc.ModEna = 0\n act_crv = params.get('ActCrv')\n if act_crv is not None:\n self.inv.hfrtc.ActCrv = act_crv\n win_tms = params.get('WinTms')\n if win_tms is not None:\n self.inv.hfrtc.WinTms = win_tms\n rmp_tms = params.get('RmpTms')\n if rmp_tms is not None:\n self.inv.hfrtc.RmpTms = rmp_tms\n rvrt_tms = params.get('RvrtTms')\n if rvrt_tms is not None:\n self.inv.hfrtc.RvrtTms = rvrt_tms\n for i in xrange(1, params['NPt'] + 1): # Uses the SunSpec indexing rules (start at 1)\n time_point = 'Tms%d' % i\n param_time_point = params.get(time_point)\n if param_time_point is not None:\n setattr(self.inv.hfrtc.l_curve[h_curve_num], time_point, param_time_point)\n freq_point = 'F%d' % i\n param_freq_point = params.get(freq_point)\n if param_freq_point is not None:\n setattr(self.inv.hfrtc.l_curve[h_curve_num], freq_point, param_freq_point)\n self.inv.hfrtc.write()\n else:\n params = {}\n self.inv.hfrtc.read()\n if self.inv.hfrtc.ModEna == 0:\n params['Ena'] = False\n else:\n params['Ena'] = True\n params['ActCrv'] = self.inv.hfrtc.ActCrv\n params['NCrv'] = self.inv.hfrtc.NCrv\n params['NPt'] = self.inv.hfrtc.NPt\n params['WinTms'] = self.inv.hfrtc.WinTms\n params['RmpTms'] = self.inv.hfrtc.RmpTms\n params['RvrtTms'] = self.inv.hfrtc.RvrtTms\n\n except Exception, e:\n raise der.DERError(str(e))\n\n return params",
"def get_candidates(self, plugin, context, sync_router, subnet_id):\n with context.session.begin(subtransactions=True):\n # allow one router is hosted by just\n # one enabled l3 agent hosting since active is just a\n # timing problem. Non-active l3 agent can return to\n # active any time\n l3_agents = plugin.get_l3_agents_hosting_routers(\n context, [sync_router['id']], admin_state_up=True)\n if l3_agents and not sync_router.get('distributed', False):\n LOG.debug(_('Router %(router_id)s has already been hosted'\n ' by L3 agent %(agent_id)s'),\n {'router_id': sync_router['id'],\n 'agent_id': l3_agents[0]['id']})\n return\n\n active_l3_agents = plugin.get_l3_agents(context, active=True)\n if not active_l3_agents:\n LOG.warn(_('No active L3 agents'))\n return\n new_l3agents = plugin.get_l3_agent_candidates(context,\n sync_router,\n active_l3_agents,\n subnet_id)\n old_l3agentset = set(l3_agents)\n if sync_router.get('distributed', False):\n new_l3agentset = set(new_l3agents)\n candidates = list(new_l3agentset - old_l3agentset)\n else:\n candidates = new_l3agents\n if not candidates:\n LOG.warn(_('No L3 agents can host the router %s'),\n sync_router['id'])\n return\n\n return candidates",
"def analyseRemainderMulticastOPT(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))",
"def uploads(self, requests, peers, history):\n \n\n round = history.current_round()\n # logging.debug(\"%s again. It's round %d.\" % (\n # self.id, round))\n\n # if no requests are made, then agent does not create any uploads\n if len(requests) == 0:\n return []\n \n # number of rounds to track in history to determine unchoke slots\n num_rounds_backtracking = 2\n num_unchoke_slots = int(math.sqrt(self.up_bw))\n\n # set of peers who get an unchoke slot\n unchoked_peers = set()\n\n # determine the list of peers who are requesting pieces from Agent\n requesting_peers = []\n for request in requests:\n if request.requester_id not in requesting_peers:\n requesting_peers.append(request.requester_id)\n\n \n # if round is less than 2 just randomly allocate unchoke slots, otherwise determine by highest download rate\n if (round < 2):\n chosen_peers = []\n if len(requesting_peers) >= num_unchoke_slots:\n chosen_peers = random.sample(requesting_peers,num_unchoke_slots)\n else:\n chosen_peers = requesting_peers\n for chosen_p in chosen_peers:\n unchoked_peers.add(chosen_p)\n\n else:\n # {peer: download_rate, .....}\n peer_by_download_rate_map = findPeerByDownloadRateInLastNRounds(\n num_rounds_backtracking, self, requesting_peers, history)\n\n # [(peer_id, download rate), ...] in descending order\n sorted_peer_by_download_rate = sorted(peer_by_download_rate_map.items(), key=lambda x:x[1], reverse=True)\n\n # find top 3 peers and their download rate\n for peer_id, download_rate in sorted_peer_by_download_rate[:num_unchoke_slots]:\n unchoked_peers.add(peer_id)\n\n # every 4th round, optimistically unchoke a peer that is not one of the top 3 peers\n if (round > 0 and round % 3 == 0 and len(requesting_peers) > len(unchoked_peers)):\n self.optimistically_unchoked_peer = random.choice(requesting_peers)\n while (self.optimistically_unchoked_peer in unchoked_peers):\n self.optimistically_unchoked_peer = random.choice(requesting_peers)\n unchoked_peers.add(self.optimistically_unchoked_peer) \n elif (self.optimistically_unchoked_peer != None):\n unchoked_peers.add(self.optimistically_unchoked_peer)\n \n bws = []\n if len(unchoked_peers) > 0:\n bws = even_split(self.up_bw, len(unchoked_peers))\n else:\n # don't allocate bandwidth if no peers are unchoked\n bws = [0 for _ in range (len(unchoked_peers))]\n\n uploads = [Upload(self.id, peer_id, bw)\n for (peer_id, bw) in zip(unchoked_peers, bws)]\n\n return uploads",
"def send_append_entries(self):\n\n assert self.role == Role.Leader\n\n for peer in self.peers:\n prev_index = self.next_index[peer]\n\n self.set_rpc_timeout(peer)\n\n # After the rpc, the node will have the entire log\n self.next_index[peer] = len(self.log)\n\n self.orchestrator.send_to_broker(\n AppendEntries(\n self.name,\n [peer],\n self.current_term,\n self.leader,\n self.next_index[peer] - 1,\n self.log.term(self.next_index[peer] - 1),\n self.log.entries[prev_index : self.next_index[peer]],\n self.commit_index,\n )\n )",
"def send_route_refresh(peer_ip):\n LOG.debug('Try to send route refresh to peer %s', peer_ip)\n json_request = flask.request.get_json()\n if 'afi' in json_request and 'safi' in json_request:\n if 'res' not in json_request:\n res = 0\n else:\n res = json_request['res']\n result = api_utils.send_route_refresh(\n peer_ip=peer_ip, afi=json_request['afi'], safi=json_request['safi'], res=res)\n return flask.jsonify(result)\n return flask.jsonify({\n 'status': False,\n 'code': 'please check your post data'\n })",
"def update_global_file_directory():\n try:\n query_id = random.randint(0,sys.maxsize)\n now_time = \"\".join(str(datetime.datetime.now()).split(\" \"))\n waiting_query_ids.append([query_id,now_time])\n\n for i in range(len(STRONG_PEERS)):\n if i != STRONG_PEER_ID:\n passing_message(i, f\"TIME:{now_time} QUERY_ID:{query_id} FROM:{STRONG_PEER_ID} TO:{i} QUERY:file_list DATA:{json.dumps(local_peer_files)}\") \n except Error as e:\n print(e)",
"def add_tn_resources(self, slice_urn, nodes, links, peer_info):\n if slice_urn not in self.__stored:\n logger.error(\"Slice monitoring: unable to find Topology info from %s!\" % slice_urn)\n return\n\n topology = self.__stored.get(slice_urn)\n\n logger.debug(\"add_tn_resources Nodes=%d, PeerInfo=%s\" %\n (len(nodes), peer_info,))\n # Iterate over the TN nodes to fetch its component ID, interface and associated VLAN tag\n for n in nodes:\n logger.debug(\"Node=%s\" % (n,))\n\n node_ = etree.SubElement(\n topology, \"node\", id=n.get(\"component_id\"), type=self.TN2TN_LINK_TYPE)\n\n for ifs in n.get(\"interfaces\"):\n etree.SubElement(\n node_, \"interface\", id=ifs.get(\"component_id\"))\n vlan = ifs.get(\"vlan\")[0].get(\"tag\")\n m_ = etree.SubElement(node_, \"match\")\n etree.SubElement(m_, \"vlan\", start=vlan, end=vlan)\n\n self.__add_rest_management(\n node_, peer_info.get(\"address\"), peer_info.get(\"port\"),\n peer_info.get(\"protocol\"))\n\n # MRO: TN links are transmitted at this layer\n if self.mro_enabled:\n logger.debug(\"add_tn_resources Links=%d\" % (len(links),))\n for l in links:\n logger.debug(\"Link=%s\" % (l,))\n # In order to avoid duplication in case of bidirectional links\n # we use the attributes of the first \"property\" here!\n if len(l.get(\"property\")) >= 1:\n p = l.get(\"property\")[0]\n #self.__add_link_info(topology, p.get(\"source_id\"), p.get(\"dest_id\"), self.TN2TN_LINK_TYPE)\n # store the values for the virtual island-to-island link\n self.__tn_links.append(\n {'id': l.get(\"component_id\"),\n 'source': p.get(\"source_id\"),\n 'destination': p.get(\"dest_id\")})",
"def _update_bgp_speaker_tenant_network_routes(self, context,\n bgp_speaker_id, topic):\n\n prefixes = self._get_tenant_network_routes_by_bgp_speaker(\n context, bgp_speaker_id)\n # Translate to the format of dragonflow db data.\n routes = [{'destination': x['destination'],\n 'nexthop': x['next_hop']} for x in prefixes]\n bgp_speaker = self.nb_api.get(bgp.BGPSpeaker(id=bgp_speaker_id,\n topic=topic))\n\n bgp_speaker.prefix_routes = routes\n self.nb_api.update(bgp_speaker, skip_send_event=True)",
"def calculate_trust(self):\n for remote_peer in self.router.peers:\n new_trust = self.t(self.router.node, remote_peer)\n self.messages.append(\"Recalculated trust of %s as %.4f.\" %\\\n (remote_peer, new_trust))\n remote_peer.trust = new_trust\n # AC = self.aggregate_trust()\n self.read_messages()\n # log(AC)",
"def R1(self, i):\n results = []\n for peer in self.router:\n remotes_peers = self.get(peer)\n for friend_of_a_friend in remotes_peers:\n if friend_of_a_friend['node'] == i.threeple and friend_of_a_friend['transactions']:\n results.append(peer)\n log(\"R1: %s %s\" % (i, str(results)))\n return results",
"def getPeers(self, peerType):\r\n raise NotImplementedError()",
"def getConnectedPeers(self, peerType):\r\n raise NotImplementedError()"
]
| [
"0.68900126",
"0.64145476",
"0.5670051",
"0.52405006",
"0.49201214",
"0.48695308",
"0.48459294",
"0.4836272",
"0.4804749",
"0.4720686",
"0.46841615",
"0.46329033",
"0.46113852",
"0.45804277",
"0.45031074",
"0.44983274",
"0.44929075",
"0.44098863",
"0.43394423",
"0.43062857",
"0.4289284",
"0.42796677",
"0.4276042",
"0.4237236",
"0.42367345",
"0.42174497",
"0.42101783",
"0.42089832",
"0.42070326",
"0.41870254"
]
| 0.7465844 | 0 |
Shares/communicates current best paths with this peers. Can be used to send initial updates after we have established session with `peer`. | def comm_all_best_paths(self, peer):
LOG.debug('Communicating current best path for all afi/safi except'
' 1/132')
# We will enqueue best path from all global destination.
for route_family, table in self._table_manager.iter:
if route_family == RF_RTC_UC:
continue
if peer.is_mbgp_cap_valid(route_family):
for dest in table.values():
if dest.best_path:
peer.communicate_path(dest.best_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comm_new_best_to_bgp_peers(self, new_best_path):\n # Filter based on standard community\n # If new best path has community attribute, it should be taken into\n # account when sending UPDATE to peers.\n comm_attr = new_best_path.get_pattr(BGP_ATTR_TYPE_COMMUNITIES)\n if comm_attr:\n comm_attr_na = comm_attr.has_comm_attr(\n BGPPathAttributeCommunities.NO_ADVERTISE\n )\n # If we have NO_ADVERTISE attribute is present, we do not send\n # UPDATE to any peers\n if comm_attr_na:\n LOG.debug('New best path has community attr. NO_ADVERTISE = %s'\n '. Hence not advertising to any peer', comm_attr_na)\n return\n\n qualified_peers = self._collect_peers_of_interest(\n new_best_path\n )\n\n # Distribute new best-path to qualified peers.\n for peer in qualified_peers:\n peer.communicate_path(new_best_path)",
"def _collect_peers_of_interest(self, new_best_path):\n path_rts = new_best_path.get_rts()\n qualified_peers = set(self._peers.values())\n\n # Filter out peers based on RTC_AS setting if path is for RT_NLRI\n qualified_peers = self._rt_manager.filter_by_origin_as(\n new_best_path, qualified_peers\n )\n\n # We continue to filter out qualified peer based on path RTs\n # If new best path has RTs, we need to share this UPDATE with\n # qualifying peers\n if path_rts:\n # We add Default_RTC_NLRI to path RTs so that we can send it to\n # peers that have expressed interest in all paths\n path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)\n # All peers that do not have RTC capability qualify\n qualified_peers = set(self._get_non_rtc_peers())\n # Peers that have RTC capability and have common RT with the path\n # also qualify\n peer_to_rtfilter_map = self._peer_to_rtfilter_map\n for peer, rt_filter in peer_to_rtfilter_map.items():\n # Ignore Network Controller (its not a BGP peer)\n if peer is None:\n continue\n\n if rt_filter is None:\n qualified_peers.add(peer)\n elif rt_filter.intersection(path_rts):\n qualified_peers.add(peer)\n\n return qualified_peers",
"async def store_peers(self, peer: Peer):\n await self.peers.store(peer)",
"def resend_sent(self, route_family, peer):\n if peer not in self._peers.values():\n raise ValueError('Could not find given peer (%s)' % peer)\n\n if route_family not in SUPPORTED_GLOBAL_RF:\n raise ValueError(\n 'Given route family (%s) is not supported.' % route_family\n )\n\n # Iterate over the global table for given afi, safi and enqueue\n # out-going routes.\n table = self._table_manager.get_global_table_by_route_family(\n route_family\n )\n\n for destination in table.values():\n # Check if this destination's sent - routes include this peer.\n # i.e. check if this destinations was advertised and enqueue\n # the path only if it was. If the current best-path has not been\n # advertised before, it might already have a OutgoingRoute queued\n # to be sent to the peer.\n sent_routes = destination.sent_routes\n if sent_routes is None or len(sent_routes) == 0:\n continue\n for sent_route in sent_routes:\n if sent_route.sent_peer == peer:\n # update med - if previously med was set per neighbor or\n # wasn't set at all now it could have changed and we may\n # need to set new value there\n p = sent_route.path\n if p.med_set_by_target_neighbor or p.get_pattr(\n BGP_ATTR_TYPE_MULTI_EXIT_DISC) is None:\n sent_route.path = \\\n clone_path_and_update_med_for_target_neighbor(\n sent_route.path, peer.med\n )\n\n ogr = OutgoingRoute(sent_route.path,\n for_route_refresh=True)\n peer.enque_outgoing_msg(ogr)",
"def change_way(coins, opponentLocation, player_location):\n global best_weight, best_path\n dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n coins_to_search = get_n_shortest(5, coins, player_location, dists_matrix)\n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n for c in coins_to_search:\n if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n coins_to_search.remove(c)\n break\n best_weight = float(\"inf\")\n best_path = []\n api.debug(coins_to_search)\n exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n meta_route = [player_location] + best_path\n api.debug(meta_route)\n route = u.location_list_to_route(meta_route, route_matrix)\n \n return coins_to_search, meta_route, route, dist_matrix[player_location][meta_route[1]]",
"def client():\t\n\t#Gets the requested file from the server address given\n\tdef getFile(serverAddr):\n\t\t\ttry: \n\t\t\t\t#connect to peer server\n\t\t\t\tpeerServer = socket.socket()\n\t\t\t\tpeerServer.connect((serverAddr, PEER_SERVER_PORT))\n\t\t\t\tpickled = pickle.dumps(requestedFile)\n\t\t\t\t#Ask for file \n\t\t\t\tpeerServer.send(pickled)\n\t\t\t\t#get file size\n\t\t\t\tfilesize = peerServer.recv(1024)\n\t\t\t\tfilesize = pickle.loads(filesize)\n\t\t\texcept socket.error as err:\n\t\t\t\tprint(\"- Peer Client: Problem connecting to peer server: \" + str(serverAddr))\n\t\t\t\t\n\t\t\t\t\n\t\t\t#if filesize == 0: then file does not exist\n\t\t\tif filesize == 0:\n\t\t\t\tprint(\"- Peer Client: Requested file was not found on peer server: \" + serverAddr)\n\t\t\t\treturn\n\t\n\t\n\t\t\t#Download file\n\t\t\twith open(LOCAL_SHARED_FILE_SPACE + \"\\\\\" + requestedFile, 'wb') as file:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdata = peerServer.recv(1024) \n\t\t\t\t\texcept socket.error as err:\n\t\t\t\t\t\tprint(\"- Peer Client: Download interrupted\")\n\t\t\t\t\t\treturn\n\t\t\t\t\tif len(data) == 0: break\n\t\t\t\t\t#data = pickle.loads(data)\n\t\t\t\t\tfile.write(data)\n\t\t\tprint(\"+ Peer Client: retreived file: '\" + str(requestedFile) + \"'\")\n\n\tdef getCatalog(sock):\n\t\t#connect to index server\n\t\ttry: \n\t\t\tsock.connect((INDEX_SERVER_IP, INDEX_SERVER_PORT))\n\t\t\tsock.send(pickle.dumps((3, \"search\")))\n\t\t\tmsg = \"\\n\"\n\t\t\t\n\t\t\t#get string of all the file names\n\t\t\twhile True:\n\t\t\t\tdata = sock.recv(1024)\n\t\t\t\tmsg = msg + data.decode()\n\t\t\t\tif len(data) < 1024: break\n\t\t\t\t\n\t\t\t#give file names to user\n\t\t\tprint(msg)\n\t\texcept socket.error as err:\n\t\t\tprint(\"- Peer Client: failed to contact index server, aborting\")\n\t\t\treturn -1\n\t\t\t\n\tdef clientSearch(sock, requestedFile, flag):\n\t\t\t#connect to index server\n\t\t\ttry: \n\t\t\t\tsock.connect((INDEX_SERVER_IP, INDEX_SERVER_PORT))\n\t\t\t\t#search index server\n\t\t\t\tpickled = pickle.dumps((SEARCH_FILES, requestedFile))\n\t\t\t\tsock.send(pickled) \n\t\t\texcept socket.error as err:\n\t\t\t\tprint(\"- Peer Client: failed to contact index server, aborting\")\n\t\t\t\treturn -1\n\t\t\n\t\n\t\t\t#TODO: what if reponse is larger than 1024\n\t\t\ttry: \n\t\t\t\tresponse = sock.recv(1024)\n\t\t\t\tresponse = pickle.loads(response)\n\t\t\texcept socket.error as err:\n\t\t\t\tprint(\"- Peer Client: failed to contact index server, aborting\")\n\t\t\t\treturn -1\n\t\t\tsock.close()\n\t\t\tif len(response) == 0:\n\t\t\t\tprint(\"- Peer Client: File not registered with index server\")\n\t\t\t\treturn -1\n\t\t\telif flag == 1:\n\t\t\t\tserverAddr = response[random.randint(0,len(response) - 1)]\n\t\t\t\tprint(\"+ Peer Client: Looking for '\" + str(requestedFile) + \"' on: \" + str(serverAddr))\n\t\t\t\tgetFile(serverAddr)\n\t\t\telse:\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\t\t\t\n\tglobal PEER_SERVER_STATUS\n\ttimeTotal = 0\n\twhile get_PEER_SERVER_STATUS() == 0:\n\t\tsleep(1)\n\t\tif get_PEER_SERVER_STATUS() == 1: break\n\t\telif get_PEER_SERVER_STATUS() == -1: return -1\n\t\n\t#create client socket\n\tprint(\"+ Peer Client: Client starting...\")\n\tsock = createClientSocket()\n\tif sock == -1: \n\t\tprint(\"- Peer Client: Client failed to start\")\n\t\treturn -1\n\t\n\t#################################################\n\t##\n\t##\tinput loop with peer server-client protocol:\n\t##\t\t1. SEND: requested file name to index server\n\t##\t\t2. RECV: list of peers that host that file\n\t##\t\t3. SEND: file name to peer server\n\t##\t\t4. RECV: file size\n\t##\t\t5. RECV: file\n\t##\n\t##\t- If list of peers that the index server sends is empty\n\t##\tthen there are no peers hosting that file\n\t##\t- If the file size returned by the peer server is 0\n\t## \tthen the peer does not have the file\n\t##\n\t#################################################\n\tprint(\"+ Peer Client: Client started\")\n\twhile True:\n\t\tprint(\"\\n\")\n\t\tprint(\"----------- Special Commands -----------\")\n\t\tprint(\"search: retreives list of registered files\")\n\t\tprint(\"exit: exits the program\")\n\t\tprint(\"----------------------------------------\")\n\t\trequestedFile = input(\"What file do you want?\\n\")\n\t\tif requestedFile == 'exit':\n\t\t\tsock.close()\n\t\t\tset_PEER_CLIENT_STATUS(-1)\n\t\t\tbreak\n\t\telif requestedFile == 'search':\n\t\t\tgetCatalog(sock)\n\t\t\tsock.close()\n\t\t\tsock = createClientSocket()\n\t\telif requestedFile == 'test.txt':\n\t\t\ttest = open(\"test.txt\", \"r\")\n\t\t\tprint(\"test file opened\")\n\t\t\twhile True:\n\t\t\t\tsock = createClientSocket()\n\t\t\t\tif sock == -1: \n\t\t\t\t\treturn -1\n\t\t\t\ttimeStart = time.time()\n\t\t\t\trequestedFile = test.readline().strip()\n\t\t\t\tif requestedFile == '':\n\t\t\t\t\tbreak\n\t\t\t\tprint(\"Searching for file: \" + requestedFile)\n\t\t\t\tclientSearch(sock, requestedFile, 0)\n\t\t\t\ttimeTotal += (time.time() - timeStart)\n\t\t\ttimeAvg = timeTotal/500\n\t\t\tprint(\"Average search request in seconds: \" + str(timeAvg))\n\t\t\t \n\t\t\t\t\n\t\telse:\n\t\t\tclientSearch(sock, requestedFile, 1)\n\t\t\tsock.close()\n\t\t\tsock = createClientSocket()\n\t\t\n\t\t#connect to index server\n\t\t\"\"\"\n\t\ttry: \n\t\t\tsock.connect((INDEX_SERVER_IP, INDEX_SERVER_PORT))\n\t\texcept socket.error as err:\n\t\t\tprint(\"- Peer Client: failed to contact index server, aborting\")\n\t\t\tcontinue\n\t\t\n\t\t#search index server\n\t\tpickled = pickle.dumps((SEARCH_FILES, requestedFile))\n\t\tsock.send(pickled) # TODO: add try\n\t\t\n\t\t#TODO: what if reponse is larger than 1024\n\t\tresponse = sock.recv(1024)\t#TODO: add try\n\t\tresponse = pickle.loads(response)\n\t\tsock.close()\n\t\tsock = createClientSocket()\n\t\tif sock == -1: return -1\n\t\tif len(response) == 0:\n\t\t\tprint(\"+ Peer Client: File not registered with index server\")\n\t\t\tcontinue\n\t\t#TODO: add error handling for if empty list is sent\n\t\t#print(\"looking on server: \" + str(response[0])) \n\t\ttry: \n\t\t\tpeerServer = socket.socket()\n\t\t\tpeerServer.connect((response[0], PEER_SERVER_PORT))\n\t\t\tpickled = pickle.dumps(requestedFile)\n\t\t\tpeerServer.send(pickled)\n\t\t\tfilesize = peerServer.recv(1024) # TODO: add try\n\t\t\tfilesize = pickle.loads(filesize)\n\t\texcept socket.error as err:\n\t\t\tprint(\"- Peer Client: Problem connecting to peer server: \" + str(response[0]))\n\t\t\tcontinue\n\t\t#print(\"filesize: \")\n\t\t#print(filesize)\n\t\tif filesize == 0:\n\t\t\tprint(\"- Peer Client: Requested file was not found on peer server: \" + response[0])\n\t\t\n\t\t\n\t\t#parse index server results\n\t\twith open(LOCAL_SHARED_FILE_SPACE + \"\\\\\" + requestedFile, 'w') as file:\n\t\t\twhile True:\n\t\t\t\tdata = peerServer.recv(1024) # TODO: add try\n\t\t\t\tif len(data) == 0: break\n\t\t\t\tdata = pickle.loads(data)\n\t\t\t\tfile.write(data)\n\t\t\tcontinue\n\t\t\t\"\"\"",
"def resolve(self):\n # Initialize the winner chain with the local chain\n winner_chain = self.chain\n replace = False\n for node in self.__peer_nodes:\n url = 'http://{}/chain'.format(node)\n try:\n # Send a request and store the response\n response = requests.get(url)\n # Retrieve the JSON data as a dictionary\n node_chain = response.json()\n # Convert the dictionary list to a list of block AND transaction objects\n node_chain = [Block(block['index'], block['previous_hash'], [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']],\n [Chipsaction(tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']],\n [Messsaction(tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']],\n block['proof'], block['timestamp']) for block in node_chain]\n node_chain_length = len(node_chain)\n local_chain_length = len(winner_chain)\n # Store the received chain as the current winner chain if it's longer AND valid\n if node_chain_length > local_chain_length and Verification.verify_chain(node_chain):\n winner_chain = node_chain\n replace = True\n except requests.exceptions.ConnectionError:\n continue\n self.resolve_conflicts = False\n # Replace the local chain with the winner chain\n self.chain = winner_chain\n if replace:\n self.__open_transactions = []\n self.__open_chipsactions = []\n self.__open_messsactions = []\n self.save_data()\n return replace",
"def uploads(self, requests, peers, history):\n\n round = history.current_round()\n logging.debug(\"%s again. It's round %d.\" % (\n self.id, round))\n # One could look at other stuff in the history too here.\n # For example, history.downloads[round-1] (if round != 0, of course)\n # has a list of Download objects for each Download to this peer in\n # the previous round.\n\n chosen = []\n bws = []\n if len(requests) == 0:\n logging.debug(\"No one wants my pieces!\")\n else:\n if round == 0:\n chosen = [request.requester_id for request in requests]\n bws = even_split(self.up_bw, len(chosen))\n else:\n requester_ids = [request.requester_id for request in requests]\n # requester_id : blocks given to us in last round\n last_dls = {}\n # find peers who unchoked me and update\n for dl in history.downloads[round-1]:\n # update peer with observed flow from peer if peer is a requester\n if dl.from_id in requester_ids:\n last_dls[dl.from_id] = dl.blocks\n\n # smallest to largest\n sorted_ids = sorted(last_dls, key=lambda k: last_dls[k], reverse=False)\n #if len(sorted_ids) > 3:\n # sorted_ids = sorted_ids[:2]\n total_dl = sum([last_dls[k] for k in sorted_ids])\n\n for chosen_peer in sorted_ids:\n chosen.append(chosen_peer)\n ratio = float(last_dls[chosen_peer])/float(total_dl)\n bw = ratio*self.percentage*self.up_bw\n bws.append(bw)\n\n others = list(set(requester_ids) - set(sorted_ids))\n if len(others) > 0:\n optimistic = random.choice(others)\n chosen.append(optimistic)\n bws.append(self.up_bw-sum(bws))\n\n # create actual uploads out of the list of peer ids and bandwidths\n uploads = [Upload(self.id, peer_id, bw)\n for (peer_id, bw) in zip(chosen, bws)]\n \n return uploads",
"def setPeer (self, peer):\n\t\tself.peer = peer",
"async def _sync(self) -> None:\n # Ensure we are only syncing once and not double calling this method\n if self.sync_store.get_sync_mode():\n return None\n\n if self.sync_store.get_long_sync():\n self.log.debug(\"already in long sync\")\n return None\n\n self.sync_store.set_long_sync(True)\n self.log.debug(\"long sync started\")\n try:\n self.log.info(\"Starting to perform sync.\")\n self.log.info(\"Waiting to receive peaks from peers.\")\n\n # Wait until we have 3 peaks or up to a max of 30 seconds\n peaks = []\n for i in range(300):\n peaks = [peak.header_hash for peak in self.sync_store.get_peak_of_each_peer().values()]\n if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:\n if self._shut_down:\n return None\n await asyncio.sleep(0.1)\n continue\n break\n\n self.log.info(f\"Collected a total of {len(peaks)} peaks.\")\n\n # Based on responses from peers about the current peaks, see which peak is the heaviest\n # (similar to longest chain rule).\n target_peak = self.sync_store.get_heaviest_peak()\n\n if target_peak is None:\n raise RuntimeError(\"Not performing sync, no peaks collected\")\n\n self.sync_store.target_peak = target_peak\n\n self.log.info(f\"Selected peak {target_peak}\")\n # Check which peers are updated to this height\n\n peers = self.server.get_connections(NodeType.FULL_NODE)\n coroutines = []\n for peer in peers:\n coroutines.append(\n peer.call_api(\n FullNodeAPI.request_block,\n full_node_protocol.RequestBlock(target_peak.height, True),\n timeout=10,\n )\n )\n for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):\n if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):\n self.sync_store.peer_has_block(\n target_peak.header_hash, peers[i].peer_node_id, target_peak.weight, target_peak.height, False\n )\n # TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak\n fork_point, summaries = await self.request_validate_wp(\n target_peak.header_hash, target_peak.height, target_peak.weight\n )\n # Ensures that the fork point does not change\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n await self.blockchain.warmup(fork_point)\n await self.sync_from_fork_point(fork_point, target_peak.height, target_peak.header_hash, summaries)\n except asyncio.CancelledError:\n self.log.warning(\"Syncing failed, CancelledError\")\n except Exception as e:\n tb = traceback.format_exc()\n self.log.error(f\"Error with syncing: {type(e)}{tb}\")\n finally:\n if self._shut_down:\n return None\n await self._finish_sync()",
"def passing_message(target, message):\n \n shortest_path = strong_peer_graph.findShortestPath(STRONG_PEER_ID,target)\n next_node = shortest_path[1]\n\n send_message(neighbor_strong_peer_sockets[next_node],'', message)",
"def uploads(self, requests, peers, history):\n\n round = history.current_round()\n logging.debug(\"%s again. It's round %d.\" % (\n self.id, round))\n # One could look at other stuff in the history too here.\n # For example, history.downloads[round-1] (if round != 0, of course)\n # has a list of Download objects for each Download to this peer in\n # the previous round.\n\n if len(requests) == 0:\n logging.debug(\"No one wants my pieces!\")\n chosen = []\n bws = []\n else:\n logging.debug(\"Still here: uploading to a random peer\")\n # change my internal state for no reason\n self.dummy_state[\"cake\"] = \"pie\"\n # filler value for peers for whom we have no historical data: 1, assume perfect collab\n # divide the upload and download rates to get the ratios\n requester_ratios = {} # keys: requester ids, values: requester ratios\n random.shuffle(requests) # break symmetries again with random shuffling, for extra safety\n for r in requests:\n # if not in peer_ratios already, initialize their values in peer_ratios\n if r.requester_id not in self.peer_ratios.keys(): \n blocks_uploaded_lst = [] # amount of blocks uploaded to this requester in past\n for rnd in history.uploads:\n if rnd != []:\n blocks_uploaded_lst += [u.bw for u in rnd if u.to_id == r.requester_id]\n # initialize values of u and d\n self.peer_ratios[r.requester_id] = {\"u\": 1, \"d\": 1} # previously 1\n if len(blocks_uploaded_lst) != 0:\n self.peer_ratios[r.requester_id][\"u\"] = (len(blocks_uploaded_lst)/round)/4\n self.peer_ratios[r.requester_id][\"d\"] = (len(blocks_uploaded_lst)/round)/4\n\n requester_ratios[r.requester_id] = self.peer_ratios[r.requester_id]\\\n [\"d\"]/self.peer_ratios[r.requester_id][\"u\"] \n\n # sort this dictionary in descending order, get list of tuples (requester_id, ratio)\n requester_ratios_sorted = sorted(requester_ratios.items(), key=lambda x: x[1], \\\n reverse=True)\n # Now we allocate according to this order \n # we allocate the denomiator of each ratio\n # until we hit the max cap, or until we hit the end of all requesters\n chosen = []\n bws = []\n sum_up = 0\n counter = 0\n # FIXME: STILL EXCEEDING BANDWIDTH HERE!!!\n \n while sum_up < self.up_bw and \\\n counter < len(requester_ratios_sorted):\n pid = requester_ratios_sorted[counter][0]\n peer_bw = self.peer_ratios[pid][\"u\"]\n if peer_bw > 0:\n chosen.append(pid)\n bws.append(peer_bw)\n sum_up += peer_bw\n counter += 1\n\n # assumption: if there is any bandwidth remainder, give it all to the top ranked peer\n if sum_up < self.up_bw:\n bws[0] += math.floor(self.up_bw - sum_up)\n\n # assumptiopn: clean up: it's possible with this loop structure to over-allocate bandwidth\n # remove extra bandwidth by deleting the agents with least amount\n # this will also overshoot slightly in some cases . . . add back bandwidth to top\n # agent again so we use exactly all of it\n if sum_up > self.up_bw*self.threshold:\n lost_bw = bws.pop()\n while lost_bw < (sum_up - self.up_bw*self.threshold) and bws != []:\n lost_bw = bws.pop()\n\n # devote the rest of bandwidth to 2 unchoked peers, evenly split between them\n extra_bandwidth = math.floor(self.up_bw - sum(bws))\n choked_peers = set([r.requester_id for r in requests]) - set(chosen)\n if len(choked_peers) == 0 and len(bws) != 0:\n val = math.floor(extra_bandwidth/len(bws))\n bws = [x + val for x in bws]\n else:\n n = min(len(choked_peers), 2)\n chosen.append(random.sample(choked_peers, n))\n bws.append(even_split(extra_bandwidth, n))\n\n # update the estimates of upload and download rates\n for (pid, _rate_dict) in self.peer_ratios.items():\n # boolean indicator for whether peer unchoked me in last round\n unchoked_met1_bool = (pid in [d.from_id for d in history.downloads[round-1]])\n # now create a bool to indicate whether this peer unchoked me in the last r periods\n unchoked_metr_bool = True\n for r in range(round, max(0, round-self.r)): \n if pid not in [d.from_id for d in history.downloads[r]]:\n unchoked_metr_bool = False\n \n # update as described in textbook\n if not unchoked_met1_bool:\n self.peer_ratios[pid][\"u\"] *= (1 + self.alpha)\n if unchoked_met1_bool:\n # update based on the amount of observed download rates\n downloads_from_peer = [d for d in history.downloads[round-1] if d.from_id == pid]\n total_blocks = len(set([d.blocks for d in downloads_from_peer])) \n self.peer_ratios[pid][\"d\"] += ((self.peer_ratios[pid][\"d\"]) -\n (round/(round-1)**2) + (total_blocks/(round-1)))/(round/(round-1)) \n # I worked out this math, please see the writeup\n # denominator of previous rate value will always be current_round-1\n # I want to add the rate in blocks/round to the old rate in order to update the download rate\n if unchoked_metr_bool:\n self.peer_ratios[pid][\"u\"] *= (1 - self.gamma)\n\n # create actual uploads out of the list of peer ids and bandwidths\n uploads = [Upload(self.id, peer_id, bw)\n for (peer_id, bw) in zip(chosen, bws)]\n \n return uploads",
"def __request_virdir(self):\n for pn in self.peernames:\n with socket.socket() as tmpsock:\n tmpsock.connect(tuple(pn))\n\n # Solicitud\n tmpsock.send(message.REQDIRMSG)\n header = tmpsock.recv(5)\n\n if header[0] != message.GIVEDIR:\n raise ValueError(\n \"[SHARE] error al solicitar archivos a\", pn)\n\n # Primero se le piden sus archivos\n bodysize = int.from_bytes(header[1:5], byteorder=\"big\")\n body = tmpsock.recv(bodysize)\n port, sharelist = message.parse_file_bytes(body, bodysize)\n self.__add_sharefiles(sharelist, pn)\n\n # Despues le comparte los suyos\n tmpsock.send(message.build_givedir_message(\n self.port, self.files))",
"def update_global_file_directory():\n try:\n query_id = random.randint(0,sys.maxsize)\n now_time = \"\".join(str(datetime.datetime.now()).split(\" \"))\n waiting_query_ids.append([query_id,now_time])\n\n for i in range(len(STRONG_PEERS)):\n if i != STRONG_PEER_ID:\n passing_message(i, f\"TIME:{now_time} QUERY_ID:{query_id} FROM:{STRONG_PEER_ID} TO:{i} QUERY:file_list DATA:{json.dumps(local_peer_files)}\") \n except Error as e:\n print(e)",
"def backTrack(metaGraph, startNode, path, deep):\n global bestDistance\n global bestPaths\n\n bestDistance = float('inf')\n bestPaths = []\n \n BT_auxi(nodeStart, nodes, distance, path)\n return bestDistance, bestPaths",
"def ResortPeers(self):\n \n self.sortedPeerList = []\n append = self.sortedPeerList.append\n for i in self.peerDatabase.keys():\n append((self.peerDatabase[i].RemainingRemoteStorage(), i))\n self.sortedPeerList.sort()\n self.sortedPeerList.reverse()",
"def Broadcast(self, method, *args, **kwargs):\n for peer_id, (host, port, peer) in self.peers.iteritems():\n logging.debug('Calling method %r on peer %r.' % (method, peer_id))\n m = getattr(peer, method)\n m(self.peer_id, *args, **kwargs)",
"def message_sent(self, message):\n with self.app.peers_lock:\n peer = self.app.peers[message.to]\n peer.state_connecting = True",
"def unison_sync(paths_to_sync):\n log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),\n level=INFO)\n keystone_gid = grp.getgrnam('keystone').gr_gid\n\n # NOTE(dosaboy): This will sync to all peers who have already provided\n # their ssh keys. If any existing peers have not provided their keys yet,\n # they will be silently ignored.\n unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,\n user=SSH_USER, verbose=True, gid=keystone_gid,\n fatal=True)\n\n synced_units = peer_units()\n if len(unison.collect_authed_hosts('cluster')) != len(synced_units):\n log(\"Not all peer units synced due to missing public keys\", level=INFO)\n return None\n else:\n return synced_units",
"def syncFromClient(self):\n\n # Acquire the client thread semaphore\n S_SEM.acquire()\n self.updateIndex()\n try:\n # Wait for signal then sends server's directory\n print('Started sync from client...')\n self.wait('OK')\n self.send(LOCAL_DIR)\n\n # Encode, wait for signal then send index to client\n outpkg = json.dumps(self.serverindex)\n self.wait('OK')\n self.send(outpkg)\n\n # Receive requests and files from client\n Q_LOCK.acquire()\n while True:\n request = self.receive()\n if request:\n job = tuple(request.split(','))\n self.send('OK')\n\n # Atomically add a single batch of sync jobs\n # Wait and receive file for all copy jobs\n # Put job and file in queue\n if job[0] == 'CP':\n file = self.receive(isFile=True)\n self.send('OK')\n self.jobqueue.append((job, file))\n\n # Finish adding jobs to the client\n elif job[0] == 'DONE':\n self.jobqueue.append((job, None))\n print('Done syncing from client!')\n Q_LOCK.release()\n break\n\n # Put job into jobqueue if not copy job\n else:\n self.jobqueue.append((job, None))\n\n # Start worker thread that will write to the local directory\n # Release the semaphore for the worker thread\n workerthread = WorkerThread(self.jobqueue, self)\n workerthread.start()\n THREADS['WorkerThread[{}]'.format(self.threadID)] = workerthread\n W_SEM.release()\n workerthread.join()\n self.updateIndex()\n except:\n S_SEM.release()\n self.updateIndex()",
"def set_peer_working(self, peer_id):\n self.peers[peer_id].set_working_state()",
"def execute(self):\n with self.app.peers_lock:\n for peer in self.app.peers.values():\n\n # Skip peers that have no chance at connecting.\n if peer.host is None:\n logger.log(TRACE, \"%s will not be connected as it \"\n \"doens't have a host set\", peer)\n continue\n\n if peer.state_connecting:\n self.connecting_peer(peer)\n elif peer.state_initial:\n self.connect_peer(peer)\n elif peer.state_no_connection:\n self.reconnect_peer(peer)",
"def calculateNewPath(self):\r\n\r\n\t\tnodeDict = self.simulationHandle.getMap().getNodeDict()\r\n\t\tdistDict = self.simulationHandle.getMap().getDistDict()\r\n\r\n\t\tself.pathToGoal = pathfinder.findPath(self.currentNode, self.goalNode, nodeDict, distDict)",
"def _peer_url(self, path):\r\n return \"http://127.0.0.1:{port}/peer_grading/{path}/\".format(\r\n port=self.server.port, path=path\r\n )",
"def uploads(self, requests, peers, history):\n \n\n round = history.current_round()\n # logging.debug(\"%s again. It's round %d.\" % (\n # self.id, round))\n\n # if no requests are made, then agent does not create any uploads\n if len(requests) == 0:\n return []\n \n # number of rounds to track in history to determine unchoke slots\n num_rounds_backtracking = 2\n num_unchoke_slots = int(math.sqrt(self.up_bw))\n\n # set of peers who get an unchoke slot\n unchoked_peers = set()\n\n # determine the list of peers who are requesting pieces from Agent\n requesting_peers = []\n for request in requests:\n if request.requester_id not in requesting_peers:\n requesting_peers.append(request.requester_id)\n\n \n # if round is less than 2 just randomly allocate unchoke slots, otherwise determine by highest download rate\n if (round < 2):\n chosen_peers = []\n if len(requesting_peers) >= num_unchoke_slots:\n chosen_peers = random.sample(requesting_peers,num_unchoke_slots)\n else:\n chosen_peers = requesting_peers\n for chosen_p in chosen_peers:\n unchoked_peers.add(chosen_p)\n\n else:\n # {peer: download_rate, .....}\n peer_by_download_rate_map = findPeerByDownloadRateInLastNRounds(\n num_rounds_backtracking, self, requesting_peers, history)\n\n # [(peer_id, download rate), ...] in descending order\n sorted_peer_by_download_rate = sorted(peer_by_download_rate_map.items(), key=lambda x:x[1], reverse=True)\n\n # find top 3 peers and their download rate\n for peer_id, download_rate in sorted_peer_by_download_rate[:num_unchoke_slots]:\n unchoked_peers.add(peer_id)\n\n # every 4th round, optimistically unchoke a peer that is not one of the top 3 peers\n if (round > 0 and round % 3 == 0 and len(requesting_peers) > len(unchoked_peers)):\n self.optimistically_unchoked_peer = random.choice(requesting_peers)\n while (self.optimistically_unchoked_peer in unchoked_peers):\n self.optimistically_unchoked_peer = random.choice(requesting_peers)\n unchoked_peers.add(self.optimistically_unchoked_peer) \n elif (self.optimistically_unchoked_peer != None):\n unchoked_peers.add(self.optimistically_unchoked_peer)\n \n bws = []\n if len(unchoked_peers) > 0:\n bws = even_split(self.up_bw, len(unchoked_peers))\n else:\n # don't allocate bandwidth if no peers are unchoked\n bws = [0 for _ in range (len(unchoked_peers))]\n\n uploads = [Upload(self.id, peer_id, bw)\n for (peer_id, bw) in zip(unchoked_peers, bws)]\n\n return uploads",
"def send_update(self, target_block, DIR):\r\n new_opts = []\r\n new_weights = []\r\n if len(self.block_opts) != 1:\r\n raise Exception (\"Improperly collapsed block!\")\r\n i = self.block_opts[0] #our state\r\n for k in range(len(target_block.block_opts)): #k is their state\r\n #print(\"Checking \",i,k,DIR)\r\n if check_allowed(i,target_block.block_opts[k],DIR):\r\n new_opts.append(target_block.block_opts[k])\r\n new_weights.append(target_block.block_weights[k])\r\n target_block.block_opts = new_opts\r\n n = sum(new_weights)\r\n target_block.block_weights = [x/n for x in new_weights]\r\n target_block.block_weights = new_weights\r\n target_block.arr = target_block.superposition()\r\n return",
"def send_path(self, path):\n self.clear_path()\n for coordinate in path:\n self.send_coordinate(coordinate)\n time.sleep(0.05)",
"def _send_request_user_best_stop(self):\n self.request(self.user_stop[\"route_id\"], self.user_stop[\"trip_headsign\"], self.user_stop[\"stop_chosen\"])",
"def update_path(self):\r\n if len(self.queue) == 0:\r\n return\r\n self.path[:] = []\r\n current = self.peek_queue()[0]\r\n while current in self.previous:\r\n self.path.append(current)\r\n current = self.previous[current]",
"def AddConnectedPeer(self, peer):\n # if present\n self.RemoveFromQueue(peer.address)\n self.AddKnownAddress(peer.address)\n\n if len(self.Peers) > settings.CONNECTED_PEER_MAX:\n peer.Disconnect(\"Max connected peers reached\", isDead=False)\n\n if peer not in self.Peers:\n self.Peers.append(peer)\n else:\n # either peer is already in the list and it has reconnected before it timed out on our side\n # or it's trying to connect multiple times\n # or we hit the max connected peer count\n self.RemoveKnownAddress(peer.address)\n peer.Disconnect()"
]
| [
"0.5883881",
"0.57443553",
"0.56123716",
"0.5499878",
"0.5477993",
"0.5329888",
"0.5321222",
"0.52948636",
"0.52582544",
"0.516415",
"0.50444376",
"0.50317",
"0.5019475",
"0.499753",
"0.49965003",
"0.49741516",
"0.4948319",
"0.4933094",
"0.49290287",
"0.49048895",
"0.49046937",
"0.48901987",
"0.48733118",
"0.48644933",
"0.48631778",
"0.48395592",
"0.48159808",
"0.4807351",
"0.47672057",
"0.47657046"
]
| 0.68576074 | 0 |
Communicates/enqueues given best path to be sent to all qualifying bgp peers. If this path came from iBGP peers, it is not sent to other iBGP peers. If this path has communityattribute, and if settings for recognize wellknow attributes is set, we do as per [RFC1997], and queue outgoing route only to qualifying BGP peers. | def comm_new_best_to_bgp_peers(self, new_best_path):
# Filter based on standard community
# If new best path has community attribute, it should be taken into
# account when sending UPDATE to peers.
comm_attr = new_best_path.get_pattr(BGP_ATTR_TYPE_COMMUNITIES)
if comm_attr:
comm_attr_na = comm_attr.has_comm_attr(
BGPPathAttributeCommunities.NO_ADVERTISE
)
# If we have NO_ADVERTISE attribute is present, we do not send
# UPDATE to any peers
if comm_attr_na:
LOG.debug('New best path has community attr. NO_ADVERTISE = %s'
'. Hence not advertising to any peer', comm_attr_na)
return
qualified_peers = self._collect_peers_of_interest(
new_best_path
)
# Distribute new best-path to qualified peers.
for peer in qualified_peers:
peer.communicate_path(new_best_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)",
"def _collect_peers_of_interest(self, new_best_path):\n path_rts = new_best_path.get_rts()\n qualified_peers = set(self._peers.values())\n\n # Filter out peers based on RTC_AS setting if path is for RT_NLRI\n qualified_peers = self._rt_manager.filter_by_origin_as(\n new_best_path, qualified_peers\n )\n\n # We continue to filter out qualified peer based on path RTs\n # If new best path has RTs, we need to share this UPDATE with\n # qualifying peers\n if path_rts:\n # We add Default_RTC_NLRI to path RTs so that we can send it to\n # peers that have expressed interest in all paths\n path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)\n # All peers that do not have RTC capability qualify\n qualified_peers = set(self._get_non_rtc_peers())\n # Peers that have RTC capability and have common RT with the path\n # also qualify\n peer_to_rtfilter_map = self._peer_to_rtfilter_map\n for peer, rt_filter in peer_to_rtfilter_map.items():\n # Ignore Network Controller (its not a BGP peer)\n if peer is None:\n continue\n\n if rt_filter is None:\n qualified_peers.add(peer)\n elif rt_filter.intersection(path_rts):\n qualified_peers.add(peer)\n\n return qualified_peers",
"def resend_sent(self, route_family, peer):\n if peer not in self._peers.values():\n raise ValueError('Could not find given peer (%s)' % peer)\n\n if route_family not in SUPPORTED_GLOBAL_RF:\n raise ValueError(\n 'Given route family (%s) is not supported.' % route_family\n )\n\n # Iterate over the global table for given afi, safi and enqueue\n # out-going routes.\n table = self._table_manager.get_global_table_by_route_family(\n route_family\n )\n\n for destination in table.values():\n # Check if this destination's sent - routes include this peer.\n # i.e. check if this destinations was advertised and enqueue\n # the path only if it was. If the current best-path has not been\n # advertised before, it might already have a OutgoingRoute queued\n # to be sent to the peer.\n sent_routes = destination.sent_routes\n if sent_routes is None or len(sent_routes) == 0:\n continue\n for sent_route in sent_routes:\n if sent_route.sent_peer == peer:\n # update med - if previously med was set per neighbor or\n # wasn't set at all now it could have changed and we may\n # need to set new value there\n p = sent_route.path\n if p.med_set_by_target_neighbor or p.get_pattr(\n BGP_ATTR_TYPE_MULTI_EXIT_DISC) is None:\n sent_route.path = \\\n clone_path_and_update_med_for_target_neighbor(\n sent_route.path, peer.med\n )\n\n ogr = OutgoingRoute(sent_route.path,\n for_route_refresh=True)\n peer.enque_outgoing_msg(ogr)",
"def Optimum_prun_based_routing(self, S, D, L):\n if self.has_path(S, D):\n \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n if path_cost <= L:\n \"\"\"go to concave cost\"\"\"\n PathConcave_cost = self.max_path_cost(Shortest_path, 'c1') \n self.G = self.rm_edge_constraint(PathConcave_cost) # remove all links where the concave link is greater than PathConcave_cost\n \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n PathConcave_cost = 0\n Opt_path = []\n return PathConcave_cost, Opt_path",
"def Option3_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n return path_cost_with_concave_function, Opt_path\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_concave_function = self.calculate_path_cost_with_concave_function(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_concave_function) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_concave_function = 0\n return path_cost_with_concave_function, Opt_path",
"def getBestPath(self):\n if self._bestPathVertex.getNextWaypoint() is None:\n numWaypointsCompleted = len(self._waypoints)\n quality = 2\n if self._vertexQueue.isEmpty():\n quality += 1\n else:\n numWaypointsCompleted = self._bestPathVertex.getNextWaypoint().getIndex()\n quality = 1\n if self._vertexQueue.isEmpty():\n quality -= 1\n \n return outputPath.generatePath(self._bestPathVertex, self._params.waypointAcceptanceRadii, quality, numWaypointsCompleted)",
"def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path",
"def plan_path(self, msg):\n # Request the map\n # In case of error, return an empty path\n mapdata = PathPlanner.request_map()\n\n if mapdata is None:\n return Path()\n # Calculate the C-space and publish it\n cspacedata = self.calc_cspace(mapdata, 3)\n # Execute A*\n start = PathPlanner.world_to_grid(mapdata, msg.start.pose.position)\n goal = PathPlanner.world_to_grid(mapdata, msg.goal.pose.position)\n \n path = self.a_star(cspacedata, start, goal) #, self.c_space_array, self.frontier, self.expanded)\n \n # Optimize waypoints\n waypoints = PathPlanner.optimize_path(path)\n # print waypoints\n waypoints.remove(waypoints[0])\n # print waypoints\n\n self.path_pub.publish(self.path_to_message(cspacedata, waypoints))\n # Return a Path message\n return self.path_to_message(cspacedata, waypoints)",
"def find_best_path(self, paths, sw, util, duration, time_now):\n bestpath = None\n bestpathmetric = None # [0,1] lower means better path\n bestpathlen = None # lower -> better path\n candidatepaths = []\n \n assert len(paths) == 2\n \n path_to_shift, shift_by = self.calculate_what_to_shift(paths, sw)\n\n pathmetrics = {}\n paths_by_length = {}\n metrics = []\n metricpaths = {}\n for path in paths:\n metric, length = self.compute_path_metric(sw, path, 0, 0, local_contrib=True)\n paths_by_length[length] = path\n metrics.append(metric)\n assert metric >= 0 \n pathmetrics[\" \".join(path)] = metric\n metricpaths[metric] = path\n\n logging.debug(\"SS FBP PATH METRICS:, %s\", str(metricpaths))\n if path_to_shift == None:\n # return shortest path\n logging.debug(\"SS FBP Returning LOCAL: %s\", str((paths_by_length[min(paths_by_length.keys())],0)))\n return (paths_by_length[min(paths_by_length.keys())], 0)\n \n \n path_to_shift_metric = pathmetrics.pop(\" \".join(path_to_shift))\n path_to_receive_metric = pathmetrics.pop(pathmetrics.keys()[0])\n logging.debug(\"SS FBP Path to Recv: %s\", str(metricpaths[path_to_receive_metric]))\n\n if (path_to_receive_metric == 0):\n logging.debug(\"SS FBP EARLY Returning : %s\", str((metricpaths[min(metrics)], 0)))\n return (metricpaths[min(metrics)], 0)\n else:\n current_ratio = path_to_shift_metric * 1.0 / path_to_receive_metric\n\n logging.debug(\"SS FBP CURRENT RATIO: %s\", str(current_ratio))\n\n\n goal_path_to_shift_metric = path_to_shift_metric * (1 - (shift_by * self.alpha))\n goal_path_to_receive_metric = path_to_receive_metric + (path_to_shift_metric * (shift_by * self.alpha))\n\n if (goal_path_to_receive_metric == 0):\n # large number for practical purposes\n goal_ratio = 100000\n else:\n goal_ratio = goal_path_to_shift_metric * 1.0 / goal_path_to_receive_metric\n\n logging.debug(\"SS FBP GOAL RATIO: %s\", str(goal_ratio))\n\n # FINALLY DECIDE WHICH PATH TO RETURN BASED ON GOAL-Current RATIO\n if goal_ratio - current_ratio < 0:\n # return path with lower utiliztion\n logging.debug(\"SS FBP LOWER Returning : %s\", str((metricpaths[min(metrics)], 0)))\n return (metricpaths[min(metrics)], 0)\n \n if goal_ratio - current_ratio > 0:\n # return path with higher utilization\n logging.debug(\"SS FBP HIGHER Returning : %s\", str((metricpaths[max(metrics)], 0)))\n return (metricpaths[max(metrics)], 0)\n\n if goal_ratio - current_ratio == 0:\n # return shortest path\n logging.debug(\"SS FBP Returning LOCAL: %s\",\n str((paths_by_length[min(paths_by_length.keys())], 0)))\n return (paths_by_length[min(paths_by_length.keys())], 0)",
"def comm_all_rt_nlris(self, peer):\n # First check if for this peer mpbgp-rtc is valid.\n if not peer.is_mbgp_cap_valid(RF_RTC_UC):\n return\n\n neigh_conf = self._neighbors_conf.get_neighbor_conf(peer.ip_address)\n peer_rtc_as = neigh_conf.rtc_as\n # Iterate over all RT_NLRI destination communicate qualifying RT_NLRIs\n rtc_table = self._table_manager.get_rtc_table()\n for dest in rtc_table.values():\n best_path = dest.best_path\n # Ignore a destination that currently does not have best path\n if not best_path:\n continue\n\n # If this is a local path\n if best_path.source is None:\n # Check RT NLRI's origin AS matches peer RTC_AS setting\n origin_as = best_path.nlri.origin_as\n if origin_as == peer_rtc_as:\n peer.communicate_path(best_path)\n else:\n # Communicate all remote RT NLRIs\n peer.communicate_path(best_path)\n\n # Also communicate EOR as per RFC\n peer.enque_end_of_rib(RF_RTC_UC)",
"def get_shortest_as_path(self, routes):\n outroutes = []\n min_val = float('inf');\n # get shortest AS path first\n for r in routes:\n if len(r[MESG][APTH]) < min_val:\n min_val = len(r[MESG][APTH])\n # find all routes with that val\n for r in routes:\n if len(r[MESG][APTH]) == min_val:\n outroutes.append(r)\n\n return outroutes",
"def optimal_route(graph,homes,source):\n number_of_homes = len(homes)\n all_pairs_distances = dict(nx.shortest_path_length(graph, weight = 'weight'))\n all_pairs_shortest_paths = dict(nx.shortest_path(graph, weight = 'weight'))\n homes_subgraph = tsp_routines.complete_shortest_path_subgraph_efficient(graph,homes,all_pairs_distances)\n num_clusters_to_clustering = clustering_routines.all_k_clusters(homes_subgraph,number_of_homes)\n \n cluster_list = range(1,number_of_homes+1)\n optimal_cost = np.Inf\n optimal_dropoffs = dict()\n optimal_route = []\n optimal_num_clusters = 0\n\n\n for num_clusters in cluster_list:\n home_clusters = num_clusters_to_clustering[num_clusters]\n cost, dropoffs, route = solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths)\n if cost < optimal_cost:\n optimal_cost = cost\n optimal_route = route \n optimal_dropoffs = dropoffs\n optimal_num_clusters = num_clusters\n\n return optimal_cost, optimal_dropoffs, optimal_route, optimal_num_clusters",
"def change_way(coins, opponentLocation, player_location):\n global best_weight, best_path\n dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n coins_to_search = get_n_shortest(5, coins, player_location, dists_matrix)\n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n for c in coins_to_search:\n if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n coins_to_search.remove(c)\n break\n best_weight = float(\"inf\")\n best_path = []\n api.debug(coins_to_search)\n exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n meta_route = [player_location] + best_path\n api.debug(meta_route)\n route = u.location_list_to_route(meta_route, route_matrix)\n \n return coins_to_search, meta_route, route, dist_matrix[player_location][meta_route[1]]",
"def get_path(self, sim, app_name, message, topology_src, alloc_DES, alloc_module, traffic, from_des):\n node_src = topology_src\n DES_dst = alloc_module[app_name][message.dst]\n\n # print (\"GET PATH\")\n # print (\"\\tNode _ src (id_topology): %i\" % node_src)\n # print (\"\\tRequest service: %s \" % message.dst)\n # print (\"\\tProcess serving that service: %s \" % DES_dst)\n\n bestPath = []\n bestDES = []\n min_path_size = float('inf')\n myDigraph = graph2digraph(sim.topology.G)\n\n for des in DES_dst: ## In this case, there are only one deployment\n dst_node = alloc_DES[des]\n print (\"\\t Looking the path to id_node: %i\" % dst_node)\n\n path = list(nx.shortest_path(myDigraph, source=node_src, target=dst_node, weight='BW'))\n actual_path_size = nx.shortest_path_length(myDigraph, source=node_src, target=dst_node, weight='BW')\n\n # Calculo do fluxo maximo no grafo desde 1 ate 6\n # flow_value, flow_dict = list(nx.maximum_flow(myDigraph, _s=0, _t=6, capacity=\"BW\"))\n # print(\"MAXFLOW VALUE: \", flow_value)\n # print(\"MAXFLOW DICT: \", flow_dict)\n\n print (\"\\t\\t candidate to best path: \" + str(path) + \" Size = \" + str(actual_path_size))\n\n if actual_path_size < min_path_size:\n min_path_size = actual_path_size\n minPath = path\n bestDES = des\n\n bestPath = [minPath]\n bestDES = [des]\n print (\"best path is: \", bestPath, \" with weigth \", min_path_size)\n print \"---------------------------------------------------------\"\n return bestPath, bestDES",
"def get_shortest_as_path(self, routes):\n # filter out any routes that don't have the shortest AS path\n outroutes = routes.copy()\n outroutes.sort(key=lambda r: len(r[MESG][APTH]))\n lowest = len(outroutes[0][MESG][APTH])\n outroutes = list(filter(lambda r: len(r[MESG][APTH]) == lowest, outroutes))\n return outroutes",
"def get_path(self, sim, app_name, message, topology_src, alloc_DES, alloc_module, traffic, from_des):\n node_src = topology_src\n DES_dst = alloc_module[app_name][message.dst] # returns an array with all DES process serving\n\n if message.dst not in self.rr.keys():\n self.rr[message.dst] = 0\n\n # print (\"GET PATH\")\n # print (\"\\tNode _ src (id_topology): %i\" % node_src)\n # print (\"\\tRequest service: %s \" % (message.dst))\n # print (\"\\tProcess serving that service: %s (pos ID: %i)\" % (DES_dst, self.rr[message.dst]))\n\n bestPath = []\n bestDES = []\n\n for ix, des in enumerate(DES_dst):\n if message.name == \"RawVideo\" or message.name == \"IdentifyObject\":\n if self.rr[message.dst] == ix:\n dst_node = alloc_DES[des]\n\n path = list(nx.shortest_path(sim.topology.G, source=node_src, target=dst_node, weight='BW'))\n\n bestPath = [path]\n bestDES = [des]\n\n self.rr[message.dst] = (self.rr[message.dst] + 1) % len(DES_dst)\n break\n else: # message.name == \"M.B\"\n\n dst_node = alloc_DES[des]\n\n path = list(nx.shortest_path(sim.topology.G, source=node_src, target=dst_node, weight='BW'))\n if message.broadcasting:\n bestPath.append(path)\n bestDES.append(des)\n else:\n bestPath = [path]\n bestDES = [des]\n\n return bestPath, bestDES",
"def test_best_path(self):\n\n # Retrieve list of route objects and dictionary of stop objects\n route_objs, stop_objs = interfaceMBTA.create_route_and_stop_data_structures() \n\n start_stop_name = 'Davis'\n end_stop_name = 'Davis'\n best_path = interfaceMBTA.get_best_path(start_stop_name,end_stop_name,stop_objs)\n self.assertEqual(best_path, [])\n\n start_stop_name = 'Davis'\n end_stop_name = 'Kendall/MIT'\n best_path = interfaceMBTA.get_best_path(start_stop_name,end_stop_name,stop_objs)\n self.assertEqual(best_path, ['Red Line'])\n\n start_stop_name = 'Kendall/MIT'\n end_stop_name = 'Davis'\n best_path = interfaceMBTA.get_best_path(start_stop_name,end_stop_name,stop_objs)\n self.assertEqual(best_path, ['Red Line'])\n\n start_stop_name = 'Ashmont'\n end_stop_name = 'Arlington'\n best_path = interfaceMBTA.get_best_path(start_stop_name,end_stop_name,stop_objs)\n self.assertEqual(best_path, ['Red Line','Green Line B'])\n\n start_stop_name = 'Arlington'\n end_stop_name = 'Ashmont'\n best_path = interfaceMBTA.get_best_path(start_stop_name,end_stop_name,stop_objs)\n self.assertEqual(best_path, ['Green Line B','Red Line'])",
"def walk(self, priv_path:list):\n # End conditions for recursive loop\n current_node = priv_path[-1]\n if current_node.location in self.destination and len(priv_path)>1:\n self.addItinerary(priv_path)\n self.n_routes+=1\n return\n if self.n_routes >= self.max_n_routes:\n return\n\n if len(priv_path)>1:\n # Get metadata of last edge type\n last_edge = self.EdgeType(priv_path[-2], priv_path[-1])\n else: # If it's start of itinerary, next edge would be travel edge\n # So, make last edge as stay\n last_edge = 'stay'\n if last_edge == 'stay': # next edge will be travel i.e., ship not None\n next_nodes = [node for node in self.G.neighbors(current_node) \n if self.G.edges[current_node, node]['ship'] is not None]\n else: # Next edge will be stay, i.e., ship = None\n next_nodes = [node for node in self.G.neighbors(current_node)\n if self.G.edges[current_node, node]['ship'] is None]\n \n for node in next_nodes:\n self.walk(priv_path+[node])",
"def calculate_path_cost_with_weighted_sum(self, path, attr1, attr2): \n costs = [] \n for i in range(len(path) - 1):\n a = (1- self.G[path[i]][path[i+1]][attr2]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n b = (1- self.G[path[i]][path[i+1]][attr1]) / (2 - self.G[path[i]][path[i+1]][attr1] - self.G[path[i]][path[i+1]][attr2]) \n costs.append(a * self.G[path[i]][path[i+1]][attr1] + b * self.G[path[i]][path[i+1]][attr2]) \n return max(costs)",
"def additive_path_cost(self, path, attr): \n return sum([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])",
"def enum_high_mass_shortest_paths(G, pool, use_scores=False, use_genes=False, seen_paths=None):\n if seen_paths == None:\n seen_paths = []\n unq_sorted_paths = set([])\n # in case orientation obliv. sorted path strings passed in\n for p in seen_paths:\n unq_sorted_paths.add(p)\n paths = []\n\n nodes = []\n nodes = list(G.nodes()) # creates a copy\n\n logger.info(\"Getting edge weights\")\n\n # use add_edge to assign edge weights to be 1/mass of starting node\n # TODO: only calculate these if they haven't been/need to be updated\n for e in G.edges():\n if use_genes and G.nodes[e[1]]['gene'] == True:\n G.add_edge(e[0], e[1], cost = 0.0)\n elif use_scores==True:\n G.add_edge(e[0], e[1], cost = (1.-(G.nodes[e[1]]['score']))/get_spades_base_mass(G, e[1]))\n else:\n G.add_edge(e[0], e[1], cost = (1./get_spades_base_mass(G, e[1])))\n\n logger.info(\"Getting shortest paths\")\n paths_list = []\n if pool._processes > 1 and pool._processes <= 2*len(nodes): # otherwise, run single threaded\n paths_list=Manager().list()\n pool.map(get_shortest, [[node, G, paths_list] for node in nodes])\n else:\n for node in nodes:\n get_shortest([node,G,paths_list])\n\n for path in paths_list:\n # below: create copy of path with each node as rc version\n # use as unique representation of a path and rc of its whole\n unoriented_sorted_path_str = get_unoriented_sorted_str(path)\n\n # here we avoid considering cyclic rotations of identical paths\n # by sorting their string representations\n # and comparing against the set already stored\n if unoriented_sorted_path_str not in unq_sorted_paths:\n unq_sorted_paths.add(unoriented_sorted_path_str)\n paths.append(tuple(path))\n\n return paths",
"def max_path_cost(self, path, attr): \n return max([self.G[path[i]][path[i+1]][attr] for i in range(len(path)-1)])",
"def graph_search(self, initial_path, goal):\n nodes_considered = 0\n frontier = PathPriorityQueue([Path(initial_path, 0)])\n explored = set()\n while True:\n if frontier.is_empty():\n return \"FAIL\"\n path = frontier.pop()\n node = path.end\n explored.add(node)\n if node == goal:\n return path\n for action in self.ACTIONS(path):\n print(str(action))\n res = action.result(path)\n if res not in explored:\n new_path = path.combine(action)\n frontier.add(new_path)\n nodes_considered+=1",
"def findWayByAStar(start, isFinish, getDistance, getIncidenceList, getHeuristicCostEstimate):\n processed_vertices = set()\n waiting_vertices = {start, }\n\n node_queue = PriorityQueue()\n node_storage = dict()\n\n node_storage[start] = _AStarNode(start)\n node_storage[start].hce = getHeuristicCostEstimate(start)\n node_storage[start].updateSum()\n node_queue.put_nowait(tuple(((node_storage[start].sum, 0), node_storage[start].vertex)))\n\n while len(waiting_vertices) != 0:\n processing_vertex = node_queue.get()[1] # item = ((priority number, priority_index), data).\n while processing_vertex in processed_vertices:\n processing_vertex = node_queue.get_nowait()[1]\n\n if isFinish(processing_vertex):\n return _createPath(processing_vertex, node_storage)\n\n _processVertex(processing_vertex, getDistance, getIncidenceList, getHeuristicCostEstimate,\n processed_vertices, waiting_vertices, node_storage, node_queue)\n\n raise Exception(\"Path doesn't exist\")",
"def solve(\n self,\n initial_routes=None,\n solver=\"cbc\",\n cspy=False,\n exact=True,\n pricing_strategy=\"PrunePaths\",\n ):\n if cspy:\n self.G.graph[\"subproblem\"] = \"cspy\"\n else:\n self.G.graph[\"subproblem\"] = \"lp\"\n print(self.G.graph[\"name\"], self.G.graph[\"subproblem\"])\n print(\"===========\")\n prob = VehicleRoutingProblem(\n self.G,\n duration=self.max_duration,\n load_capacity=self.max_load,\n drop_penalty=self.penalty,\n pickup_delivery=self.activate_pickup_delivery,\n distribution_collection=self.activate_distribution_collection,\n time_windows=self.activate_time_windows,\n )\n prob.solve(\n initial_routes=initial_routes,\n cspy=cspy,\n exact=exact,\n pricing_strategy=pricing_strategy,\n solver=solver,\n )\n self.best_value, self.best_routes = prob.best_value, prob._best_routes_as_graphs\n self.best_routes_nodes = prob.best_routes",
"def add_to_frontier(self, path):\n if self.method == \"astar\":\n value = path.cost + self.problem.heuristic(path.end())\n if self.method == \"best\":\n value = self.problem.heuristic(path.end())\n if self.method == \"least-cost\":\n value = path.cost\n self.frontier.add(path, value)",
"def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")",
"def fastest_path_estimation(sol):\n\n class Path:\n def __init__(self, places, graph):\n self.g = 0 # current cost\n self.graph = graph\n self.visited = [places[0]] # list of already visited attractions\n self.not_visited = copy.deepcopy(places[1:]) # list of attractions not yet visited\n\n def __lt__(self, other):\n return self.g < other.g\n\n def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)\n\n def add_to_heap_queue(path):\n # custom function to add to heap queue sorted by the solution's cost\n heappush(h_queue, path)\n\n if len(sol.not_visited) == 0:\n return 0\n elif len(sol.not_visited) == 1:\n return sol.graph[sol.visited[-1], sol.not_visited[0]]\n\n c = sol.visited[-1]\n pm = sol.not_visited[-1]\n # the heap queue of solution sorted by their cost - change all to tuples with g for dijkstra\n h_queue = []\n\n # the places to use for the graph\n sub_search_places = [c]\n sub_search_places.extend(sol.not_visited)\n\n # push the first \"node\" in the queue\n add_to_heap_queue(Path(sub_search_places, sol.graph))\n while True:\n # take the next solution with the shortest cost\n path = heappop(h_queue)\n # if it contains destination, stop and return that solution\n if pm in path.visited:\n return path.g\n # create a new solution for each neighbor of the current vertex and add it to heap queue\n for place in path.not_visited:\n new_path = copy.deepcopy(path)\n new_path.add(place)\n add_to_heap_queue(new_path)",
"def recommend_pathway(user_jobs, job_graph, goal_state, min_likelihood_thr):\r\n user_jobs_for_mdp = [user_jobs[0]]\r\n mdp = MDP(job_graph, user_jobs_for_mdp, goal_state, min_likelihood_thr=min_likelihood_thr)\r\n return mdp.solve_mdp()",
"def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,\n best_path):\n path[0] = path[0] + [start]\n \n if digraph.has_node(Node(start)) == False or digraph.has_node(Node(end)) == False:\n raise ValueError('Start or end node are not valid')\n elif start == end :\n return path\n else:\n for edge in digraph.get_edges_for_node(Node(start)):\n \n node = str(edge.get_destination())\n tot_distance = edge.get_total_distance() + path[1]\n tot_outdoors = edge.get_outdoor_distance() + path[2]\n\n if node not in path[0] and tot_distance <= best_dist and tot_outdoors <= max_dist_outdoors :\n updated_path = [path[0], tot_distance, tot_outdoors]\n new_path = get_best_path(digraph, node, end, updated_path, max_dist_outdoors, best_dist, best_path)\n \n if new_path:\n if not best_dist or new_path[1] < best_dist: \n best_path = new_path[0]\n best_dist = new_path[1]\n\n return best_path, best_dist"
]
| [
"0.7693337",
"0.62346447",
"0.575881",
"0.55601615",
"0.5535651",
"0.5475796",
"0.54713356",
"0.53688544",
"0.5191018",
"0.51828367",
"0.51666784",
"0.5132194",
"0.5109872",
"0.50459236",
"0.5044263",
"0.5031913",
"0.5028703",
"0.50194",
"0.49737486",
"0.4973715",
"0.4969356",
"0.49218374",
"0.48940012",
"0.48830527",
"0.48816735",
"0.4878142",
"0.48663765",
"0.4846292",
"0.48298985",
"0.48206952"
]
| 0.68744844 | 1 |
Collect all peers that qualify for sharing a path with given RTs. | def _collect_peers_of_interest(self, new_best_path):
path_rts = new_best_path.get_rts()
qualified_peers = set(self._peers.values())
# Filter out peers based on RTC_AS setting if path is for RT_NLRI
qualified_peers = self._rt_manager.filter_by_origin_as(
new_best_path, qualified_peers
)
# We continue to filter out qualified peer based on path RTs
# If new best path has RTs, we need to share this UPDATE with
# qualifying peers
if path_rts:
# We add Default_RTC_NLRI to path RTs so that we can send it to
# peers that have expressed interest in all paths
path_rts.append(RouteTargetMembershipNLRI.DEFAULT_RT)
# All peers that do not have RTC capability qualify
qualified_peers = set(self._get_non_rtc_peers())
# Peers that have RTC capability and have common RT with the path
# also qualify
peer_to_rtfilter_map = self._peer_to_rtfilter_map
for peer, rt_filter in peer_to_rtfilter_map.items():
# Ignore Network Controller (its not a BGP peer)
if peer is None:
continue
if rt_filter is None:
qualified_peers.add(peer)
elif rt_filter.intersection(path_rts):
qualified_peers.add(peer)
return qualified_peers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def comm_all_best_paths(self, peer):\n LOG.debug('Communicating current best path for all afi/safi except'\n ' 1/132')\n # We will enqueue best path from all global destination.\n for route_family, table in self._table_manager.iter:\n if route_family == RF_RTC_UC:\n continue\n if peer.is_mbgp_cap_valid(route_family):\n for dest in table.values():\n if dest.best_path:\n peer.communicate_path(dest.best_path)",
"def R1(self, i):\n results = []\n for peer in self.router:\n remotes_peers = self.get(peer)\n for friend_of_a_friend in remotes_peers:\n if friend_of_a_friend['node'] == i.threeple and friend_of_a_friend['transactions']:\n results.append(peer)\n log(\"R1: %s %s\" % (i, str(results)))\n return results",
"def get_all_resources(self) -> typing.List:\n\n session = self.session()\n\n try:\n available_peers = session\\\n .query(\n ResourceTable.peerIp,\n ResourceTable.peerPort,\n ResourceTable.resourcePath,\n ResourceTable.resourceName,\n ResourceTable.resourceHash\n )\\\n .group_by(ResourceTable.peerId, ResourceTable.resourceHash)\\\n .all()\n\n return available_peers\n\n finally:\n session.close()",
"def common_peers(self, i, j):\n ir = self.get(i, self.router.network)\n jr = self.get(j, self.router.network)\n \n if not ir or not jr:\n return []\n\n ir = [tuple(p['node']) for p in ir if p['transactions']]\n jr = [tuple(p['node']) for p in jr if p['transactions']]\n\n result = list(set(ir).intersection(jr))\n log(\"cmn: %s %s %i: %s\" % (i, j, len(result), result))\n return result",
"def sources(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if self.rank == self.midpoint - 1 and partner == self.right:\n partners = set()\n elif self.rank == self.midpoint - 1 and partner == self.right - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n else:\n partner = self.left + (self.rank - self.midpoint)\n if self.rank == self.right - 1 and partner == self.midpoint:\n partners = set()\n elif self.rank == self.right - 1 and partner == self.midpoint - 2:\n partners = {partner, partner + 1}\n else:\n partners = {partner}\n\n return partners",
"def get_all_paths(self):\n seen = set()\n for v in self:\n # v in self returns all nodes in the pathgraph\n if v not in seen:\n # self [v] returns a path containing v. If the v does not belong to a path\n # a singleton path [v] is returned\n yield self[v]\n seen.update(self[v])",
"def find_relevant_relation_ids(self, rids):\n if isinstance(rids, int):\n rids = [rids]\n found = set()\n for x in rids:\n found.update(self.find_relations(x))\n return found",
"def get_peers(self):\n self.peers = []\n retriever_methods = [\n m\n for m in rtorrent9.peer.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # need to leave 2nd arg empty (dunno why)\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"p.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n self.peers.append(Peer(self._rt_obj, self.info_hash, **results_dict))\n\n return self.peers",
"def filter_relationships(self, recRelation, routes, src, is_forward=False, is_update=False):\n dict_entry = \"src\" if is_update else \"peer\"\n outroutes = []\n if recRelation == CUST:\n if is_forward:\n return routes\n\n for val in routes:\n if val[dict_entry] != src: \n outroutes.append(val) \n\n return outroutes\n for val in routes: \n ip = val[dict_entry]\n relation = self.relations[ip] \n if relation == CUST: \n outroutes.append(val)\n \n return outroutes",
"def comm_all_rt_nlris(self, peer):\n # First check if for this peer mpbgp-rtc is valid.\n if not peer.is_mbgp_cap_valid(RF_RTC_UC):\n return\n\n neigh_conf = self._neighbors_conf.get_neighbor_conf(peer.ip_address)\n peer_rtc_as = neigh_conf.rtc_as\n # Iterate over all RT_NLRI destination communicate qualifying RT_NLRIs\n rtc_table = self._table_manager.get_rtc_table()\n for dest in rtc_table.values():\n best_path = dest.best_path\n # Ignore a destination that currently does not have best path\n if not best_path:\n continue\n\n # If this is a local path\n if best_path.source is None:\n # Check RT NLRI's origin AS matches peer RTC_AS setting\n origin_as = best_path.nlri.origin_as\n if origin_as == peer_rtc_as:\n peer.communicate_path(best_path)\n else:\n # Communicate all remote RT NLRIs\n peer.communicate_path(best_path)\n\n # Also communicate EOR as per RFC\n peer.enque_end_of_rib(RF_RTC_UC)",
"def find_paths(self, source, destination, closed=None):\n if closed is None:\n closed = set()\n closed.add(source)\n links = {x.trusted for x in self._tau\n if x.truster == source and x.trusted not in closed}\n if len(links) == 0: # base\n return []\n if destination in links: # base\n return [[Trust(source, destination)]]\n # recurse\n retval = []\n for link in links:\n linkpaths = self.find_paths(link, destination, closed)\n for path in linkpaths:\n path.insert(0, Trust(source, link))\n retval += linkpaths\n\n for path in retval:\n if None in path:\n retval.remove(path)\n if len(retval) == 0:\n return []\n return retval",
"def get_all_social_paths(self, user_id):\n if len(self.friendships) > 0:\n visited = {}\n q = Queue()\n q.enqueue([user_id])\n\n while q.size() > 0:\n curr_path = q.dequeue()\n curr_vertex = curr_path[-1]\n\n if curr_vertex not in visited:\n visited[curr_vertex] = curr_path\n\n for friend in self.friendships[curr_vertex]:\n path_copy = curr_path[:]\n path_copy.append(friend)\n q.enqueue(path_copy)\n\n return visited\n\n else:\n print(\"There are currently no friendship paths in the network\")",
"def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()",
"def get_next(paths):\r\n next_paths = []\r\n for path in paths:\r\n last_sq = path[len(path) - 1]\r\n for peer in find_peers(last_sq):\r\n next_path = path + [peer]\r\n next_paths.append(next_path)\r\n # cull out paths with duplicates\r\n return [path for path in next_paths if has_no_repeats(path)]",
"def __request_virdir(self):\n for pn in self.peernames:\n with socket.socket() as tmpsock:\n tmpsock.connect(tuple(pn))\n\n # Solicitud\n tmpsock.send(message.REQDIRMSG)\n header = tmpsock.recv(5)\n\n if header[0] != message.GIVEDIR:\n raise ValueError(\n \"[SHARE] error al solicitar archivos a\", pn)\n\n # Primero se le piden sus archivos\n bodysize = int.from_bytes(header[1:5], byteorder=\"big\")\n body = tmpsock.recv(bodysize)\n port, sharelist = message.parse_file_bytes(body, bodysize)\n self.__add_sharefiles(sharelist, pn)\n\n # Despues le comparte los suyos\n tmpsock.send(message.build_givedir_message(\n self.port, self.files))",
"def find_parents(self) -> None:\n self.referers: Dict[str, List[Har2Tree]] = defaultdict(list)\n for hartree in self.hartrees:\n if hartree.root_referer:\n self.referers[hartree.root_referer].append(hartree)",
"def filter_relationships(self, srcip, routes):\n outroutes = []\n return outroutes",
"def all_paths(self, node, destination, dist, path):\n\n d=self.dict()\n p=[]\n for i in range(len(path)):\n p.append(path[i])\n p.insert(len(p),node)\n \n if len(p)-1==dist:\n if node==destination:\n return p\n else:\n return None\n\n my_paths=[]\n\n for a in d[node]:\n if a not in p:\n p1=self.all_paths(a,destination,dist,p)\n\n if p1!=None:\n if isinstance(p1[0],list):\n for i in range(len(p1)):\n my_paths.append(p1[i])\n else:\n my_paths.append(p1)\n\n if len(my_paths)!=0:\n return my_paths\n else:\n return None",
"def unison_sync(paths_to_sync):\n log('Synchronizing CA (%s) to all peers.' % (', '.join(paths_to_sync)),\n level=INFO)\n keystone_gid = grp.getgrnam('keystone').gr_gid\n\n # NOTE(dosaboy): This will sync to all peers who have already provided\n # their ssh keys. If any existing peers have not provided their keys yet,\n # they will be silently ignored.\n unison.sync_to_peers(peer_interface='cluster', paths=paths_to_sync,\n user=SSH_USER, verbose=True, gid=keystone_gid,\n fatal=True)\n\n synced_units = peer_units()\n if len(unison.collect_authed_hosts('cluster')) != len(synced_units):\n log(\"Not all peer units synced due to missing public keys\", level=INFO)\n return None\n else:\n return synced_units",
"def aggregate_trust(self):\n AC = []\n peers = [peer for peer in self.router]\n x = len(peers)\n if x / 5:\n x = x / 5\n elif x / 2:\n x = x / 2\n for i in range(x):\n AC.append(peers[i:i+x])\n return AC",
"async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}",
"def traverse_uris(uri):\n seen = set()\n uris_to_check = [uri]\n while len(uris_to_check) > 0: \n uri = uris_to_check.pop()\n if uri not in seen:\n seen.add(uri)\n for key in keys_for_uri[uri]:\n for uri2 in uris_for_key[key]:\n if uri2 not in seen:\n uris_to_check.append(uri2)\n \n return seen",
"def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited",
"def filter_relationships(self, srcif, routes):\n outroutes = []\n rel = self.relations[srcif]\n for route in routes:\n opp_rel = self.relations[route[PEER]]\n if (rel == CUST or opp_rel == CUST) or (rel == PROV and opp_rel == PROV):\n outroutes.append(route)\n return outroutes",
"def peer_list_all(self):\n return self.client.call('GET', self.name + 'peer-list/all')",
"async def get_routes(self) -> Sequence[str]:\n results = []\n storage: BaseStorage = await self._context.inject(BaseStorage)\n async for record in storage.search_records(\n self.RECORD_TYPE, {\"to\": self._sender_verkey}\n ):\n results.append(record.value)\n return results",
"def possible(self):\n return [tuple(path) for path in nx.all_shortest_paths(self._gpm.Graph, source=self.source, target=self.target)]",
"def get_matching(self):\n verts, plaqs, d_verts, d_plaqs = self.get_stabs()\n\n # def get_matching(anyons, d_anyons):\n # edges = self.get_edges(anyons)\n # for i0, i1, weight in edges:\n # nxgraph.add_edge(i0, i1, weight=-weight)\n # output = nx.algorithms.matching.max_weight_matching(nxgraph, maxcardinality=True)\n # return [[d_anyons[i0], d_anyons[i1]] for i0, i1 in output]\n\n def get_matching(anyons, d_anyons):\n output = pm.getMatching(len(anyons), self.get_edges(anyons))\n return [[d_anyons[i0], d_anyons[i1], anyons[i0], anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]\n\n self.matching = []\n if verts:\n self.matching += get_matching(verts, d_verts)\n if plaqs:\n self.matching += get_matching(plaqs, d_plaqs)",
"def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # BFTs starting at user_id, return first path to every reachable person\n q = [[user_id]]\n while q:\n path = q.pop(0)\n person = path[-1]\n # add the person and the path to the person\n for friend in self.friendships[person]:\n if friend not in visited and friend != user_id:\n q.append(path + [friend])\n visited[friend] = path + [friend]\n\n return visited",
"def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths"
]
| [
"0.5596137",
"0.5572386",
"0.5414871",
"0.53814036",
"0.531134",
"0.52629113",
"0.5244464",
"0.52352923",
"0.5208997",
"0.51961887",
"0.5179847",
"0.5147802",
"0.5136036",
"0.51277256",
"0.5104832",
"0.50553757",
"0.5037936",
"0.50375175",
"0.5020352",
"0.50006336",
"0.5000496",
"0.49870402",
"0.49801764",
"0.49758235",
"0.4968888",
"0.49614814",
"0.49501485",
"0.49478474",
"0.49468747",
"0.49188098"
]
| 0.6937274 | 0 |
Return whether or not user_input is valid. | def validate_user_input(user_input):
responses = ['t', 'r', 'q']
return user_input in responses | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isInputValid(self, input):\r\n pass",
"def validate_input(user_input: str) -> bool:\n\n if not user_input.islower():\n return False\n\n if user_input.endswith(\"yeet\"):\n return False\n \n if \"q\" or \"Q\" in user_input: # Check if q is a letter\n return False\n \n return True # If none of the conditions above are met",
"def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None",
"def continue_playing_validation():\r\n user_input_is_valid = False\r\n while user_input_is_valid == False:\r\n user_wants_another_problem = input(\"Would you like another problem, Y/N? \").lower()\r\n if user_wants_another_problem in [\"y\", \"n\"]:\r\n user_input_is_valid = True\r\n elif user_wants_another_problem not in [\"y\", \"n\"]:\r\n user_input_is_valid = False\r\n print(f\"The input you entered, '{user_wants_another_problem}', is not valid. Try again.\\n\")\r\n return user_wants_another_problem",
"def has_input(self, name: str) -> bool:\n return self.get_input_type(name) != IN_INVALID",
"def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())",
"def is_valid_player(user_input):\n \n i = user_input.upper()\n if i in Board.player_decoder:\n return True\n elif i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n else:\n return False",
"def is_char(user_input):\n # Check lenght of input and if equal to zero return True\n if len(user_input) == 0:\n return True\n return False",
"def _is_user_wants_to_continue(self):\n\n # dummy value to get in while\n user_input = -1\n while user_input != 1 and user_input != 2:\n\n try:\n # convert the string into int\n user_input = int(input())\n except ValueError:\n print(\"Please enter a number\")\n continue\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n continue\n\n # check if the user_input was one of the options\n # if not present a error massage and try again\n if user_input != 1 and user_input != 2:\n print(\"Please enter a valid number(1-2)\")\n continue\n\n return user_input == 1",
"def is_valid_input(guess_letter):\r\n length = len(guess_letter)\r\n\r\n if length > 1 and not guess_letter.isalpha():\r\n return False\r\n elif not guess_letter.isalpha():\r\n return False\r\n elif length > 1:\r\n return False\r\n else:\r\n return True",
"def answer_input_validation():\r\n user_input_is_valid = False\r\n while user_input_is_valid == False:\r\n try:\r\n users_answer_to_problem = int(input(\"\\nEnter the answer to the addition problem: \"))\r\n user_input_is_valid = True\r\n break\r\n # The ValueError is used because the user must enter an integer. If the \r\n # answer given is not an integer, they are scolded and reprompted.\r\n except ValueError:\r\n user_input_is_valid = False\r\n print(\"That is not an integer. Please enter an appropriate answer.\")\r\n return users_answer_to_problem",
"def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False",
"def check_input(input_string):\n if len(input_string) > 50: # check if length of name is less than 50 ir not\n return False\n else:\n return bool(re.match('[a-zA-Z\\s]+$', input_string)) # check is input contains only chars and spaces",
"def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True",
"def check_inputs(acceptable, my_s):\n if my_s not in acceptable:\n print(\"Invalid response. Please enter {}\".format(acceptable))\n return my_s in acceptable",
"def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True",
"def is_valid(name):\n return bool(name)",
"def valid_response(prompt, *valid):\r\n ans = console_input(prompt).lower()\r\n\r\n if ans in valid:\r\n return True\r\n elif ans == '':\r\n return None\r\n\r\n return False",
"def validate_input(self):\n if (self.options.mexURL and self.options.token): #run module through engine service\n return True\n\n if (self.options.user and self.options.pwd and self.options.root): #run module locally (note: to test module)\n return True\n\n log.debug('Dream3D: Insufficient options or arguments to start this module')\n return False",
"def is_digit(user_input):\n # If any characters is digit return boolean True else False\n if any(char.isdigit() for char in user_input):\n return True\n return False",
"def first_is_valid(command_from_user):\n arguement_entered_user = command_from_user[0]\n if arguement_entered_user == 'list':\n return True\n \n elif arguement_entered_user == 'clashes':\n return True\n \n else:\n return False",
"def check_validity(the_guess, valid_characters, user_guesses):\n\tvalidity = True\n\tif valid_characters.find(the_guess) < 0 or len(the_guess) != 1 or the_guess in user_guesses:\n\t\tvalidity = False\n\treturn validity",
"def checkInput(userInput):\n if userInput == 'exit':\n return 0\n return 1",
"def __check_validation(input_string):\n if not input_string:\n raise NullInputException(\"Input string should be not empty\")\n if type(input_string) != str:\n raise NonStringInputException(\"Input value should be a string\")\n if len(input_string) >= 200:\n raise TooLongInputException(\"Input string should be less than 200 characters\")\n for i in input_string:\n if not i.isalpha():\n raise NonStringInputException(\"All input value characters should be an alpha\")",
"def check_input(the_user_entry):\n try:\n for z in range(length_of_bad_input):\n if bad_input[z] == the_user_entry:\n messagebox.showwarning(title=\"Invalid input!\",\n message=\"The following characters are forbidden:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")\n clear_box()\n raise ValueError\n except ValueError:\n print(\"The user entered an invalid character in the entry box\\n\"\n \"potentially one of the following:\\n\"\n \"~`!@#$%^&*()_-+={[}]|\\\\:;\\\"\\'<,>.?/1234567890\")",
"def prompt_bool_input(prompt_name: str, get_user_input: GetInputFunc) -> bool:\n answer_map = {\"yes\": 1, \"no\": 0, \"y\": 1, \"n\": 0}\n try:\n answer = str(get_user_input(f\"{prompt_name} Type in yes or no:\"))\n return bool(answer_map[answer])\n except (ValueError, IndexError) as e:\n raise InvalidInput(str(e))",
"def check_input(saved_input):\n if saved_input.lower() == \"!yes\":\n return True\n if saved_input.lower() == \"!no\":\n return False",
"def validate_user_response(self):\n is_response_valid = False\n while is_response_valid is False:\n response = self.ask_user_input(\"Please, enter a valid option or command\")\n if response in self.options.keys():\n is_response_valid = True\n self.current_response = response\n else:\n print(\"Invalid option/command, please try again\")\n return is_response_valid",
"def is_input_valid(char):\n\n # is there a char at all?\n if char is None:\n return False\n\n # check for embedded 0 byte\n if char == \"\\0\":\n return False\n\n return True",
"def prompt_user_check_input(self):\r\n user_input = 0\r\n # grabs user input and changes it to an int\r\n while True:\r\n try:\r\n user_input = int(\r\n input(\"\\033[1;33mMake your move by entering the number of an open space on the board: \\033[0m\"))\r\n except ValueError:\r\n print(\"Why do you refuse to enter a number, Dave?\")\r\n continue\r\n else:\r\n break\r\n\r\n # makes sure the user enters a number 0-8 and verifies that the space the user selected is open\r\n if self.verify_valid_num(user_input) and self.write_user_choice(user_input):\r\n return True\r\n else:\r\n self.prompt_user_check_input()"
]
| [
"0.7979359",
"0.7343028",
"0.6997487",
"0.67979217",
"0.6788595",
"0.6761745",
"0.67489755",
"0.66954297",
"0.6671437",
"0.6651406",
"0.6639176",
"0.6602737",
"0.6528035",
"0.64938813",
"0.6484191",
"0.6462842",
"0.6445494",
"0.64428204",
"0.6433951",
"0.6424168",
"0.6419654",
"0.63654095",
"0.63535875",
"0.63423777",
"0.6325793",
"0.6315953",
"0.6303789",
"0.6290558",
"0.6280011",
"0.6201787"
]
| 0.80375767 | 0 |
Add donor and donation amount to donor dictionatry. | def add_donation(donor, donation_amount, donor_dict):
donor_dict.setdefault(donor, []).append(donation_amount) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_donation(self):\n name = self.get_donor()\n # Update existing\n if name in self.all_donors:\n input_donation = int(input(\"Please enter a donation: \"))\n donations = int(self.r.hget(name, 'donations'))\n donations += input_donation\n\n self.r.hset(name, 'donations', str(donations))\n print(self.r.hget(name, 'donations'))\n # Create new\n else:\n print(\"New donor found. Please enter the following information: \")\n input_donation = str(input(\"Please enter a donation: \")) # should add value error checking by going from str to int to str\n input_email = input(\"Please enter an email: \")\n input_city = input(\"Please enter a city: \")\n input_state = input(\"Please enter a state: \")\n input_zip = input(\"Please enter the zipcode: \")\n self.r.hmset(name, {'donations': input_donation, 'email':input_email, 'city': input_city, 'state': input_state, 'zip': input_zip})",
"def create_donation(fullname, amount):\n donors[fullname].append(amount)",
"def add_donation(self, donation_amt):\n\n self._donations.append(donation_amt)\n self._total_donation += donation_amt",
"def donation_totals(donor_list, donor):\n return sum(donor_list[donor])",
"def add_donation(self, amount):\n self.donations.append(amount)",
"def update_donor(self, name, amount):\n # Create donor object if it doesn't exist, otherwise retrieve from collection\n d = self.get_donor(name)\n # Update donor object and replace in collection\n d.add_donation(amount)\n self.donors[name] = d",
"def add_donation(donor_name, donation):\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n new_donation = Donations.create(\n donation_date=datetime.today(),\n donation_amount=decimal.Decimal(donation),\n donated_by=donor_name\n )\n new_donation.save()\n logger.info(f'Saving {donor_name} - {donation}')\n except Exception as ex:\n logger.info(f'Error creating {new_donation[donation]} for {new_donation.donated_by.firstname}'\n + f' {new_donation.donated_by.lastname}')\n logger.info(ex)\n\n finally:\n database.close()",
"def addDonation(self, amount):\n self.donationList.append(amount)",
"def add_donation_to_donor(self, name, donation):\n self.get_donor_by_name(name).add_donations(donation)",
"def add_donor():\n logger.info('+++ Adding/Updating Donors')\n Main.connect_db()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n name = input(\"Type donor first and last name: \")\n location = input(\"Type donor location (optional): \")\n try:\n don = float(input(\" Donation in USD: \"))\n except ValueError:\n print(\"\"\"\n Donation must be in USD...\n Donor not added\n \"\"\")\n return()\n logger.info(\"Checking if donor {} is in Database\".format(name))\n try:\n with database.transaction():\n try:\n person = Donor.get(\n Donor.donor_name == name\n )\n except Exception as e:\n logger.info(\"Adding new donor {} from {} to Database\".format(name, location))\n new_donor = Donor.create(\n donor_name = name,\n donor_location = location\n )\n new_donor.save()\n else:\n logger.info(\"Donor {} is in Database, only donation will be added\".format(name))\n except Exception as e:\n logger.info(f'Exception cought when adding Donor!')\n logger.info(\"Exception: {}\".format(e))\n finally:\n try:\n with database.transaction():\n new_donation = Donation.create(\n donor = name,\n amount = don\n )\n new_donation.save()\n except Exception as e:\n logger.info(f'Exception cought when adding Donation!')\n logger.info(\"Exception: {}\".format(e))\n logger.info('Closing database...')\n database.close()\n\n Main.print_greetings(Main.greetings(name, don))",
"def add_donor(self, new_donor: Donor):\n self.donor_list.append(new_donor)",
"def add_donation(name, amount, donor_bool):\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return",
"def add_donor_object(self, donor_object):\n self.donors.append(donor_object)",
"def add_donor(self, name, donation=None):\n if \",\" in name: # Remove donor name commas to preserve CSV save\n print(\"Warning: Comma will be removed from name\")\n name = name.replace(\",\", \"\")\n if name in self.donor_names:\n raise NameError(f\"Can't add {name} to database.\\\n Donor already exists\")\n else:\n self.donors.append(Donor(name=name, donation=donation))",
"def __init__(self, donors=None):\n if donors is None:\n self.donor_info = {}\n else:\n for donor in donors:\n self.donor_info.update({donor.name: donor})",
"def add_donations():\n done = False\n while not done:\n name = input(\"Enter donor name (or \\\"list\\\" for list): \")\n if name == \"list\":\n # list donor names\n for d in donor_history: print(d.name)\n continue\n for thisdonor in donor_history:\n if name == thisdonor.name:\n break\n if thisdonor == None:\n thisdonor = donor(name)\n donor_history.append(thisdonor)\n print(\"Adding new donor: \" + name)\n moredonations = True\n while moredonations:\n value = input(\"Enter donation amount or -1 when finished: \")\n try:\n donation_amount = int(value)\n except ValueError:\n print(\"Invalid input, reenter.\")\n continue\n if donation_amount == -1: break\n thisdonor.donations.append(donation_amount)\n done = True\n if thisdonor: print(f\"Thank you, {name}, for your donation(s)!\")\n print()\n return",
"def addnewdonordonation(name):\n donor_db[name] = []",
"def total_donations(self):\n return sum(self.donations)",
"def total_donations(self):\n return sum(self.donations)",
"def __init__(self, donors=None):\n if donors is None:\n self.donor_inf = {}\n else:\n self.donor_inf = {name: donations}",
"def __init__(self):\n self.donors = {}",
"def sumdbkey(donorlist):\n return sum(donorlist[1])",
"def __init__(self, donor_name, donation):\n if isinstance(donor_name, str):\n self._full_name = donor_name\n else:\n raise TypeError(\"Donor Name must be type str\")\n\n if isinstance(donation, float):\n self._donations = [donation]\n else:\n raise TypeError(\"Donation must be of type float\")\n self._total_donation = donation",
"def test_add_donation():\n mailroom.add_donation(\"William Gates, III\", 100.0)\n assert mailroom.donor_db[\"William Gates, III\"] == [653772.32, 12.17, 100.0]",
"def adddonation(donorname, donationammount):\n try:\n donor_db[donorname].append(float(donationammount))\n except ValueError:\n print(\"Error: Please enter a numeric dollar amount.\\n\")\n return False\n else:\n return True",
"def donate(self):\n\n # Get item\n import converter\n self.hero.inventory_menu()\n item = prompt(\"Select a weapon, shield or armor to donate. Or \\\npress enter to exit. \").lower()\n item = converter.convert(item)\n\n # If item is a weapon, shield or armor, accept the donation\n if isinstance(item, items.Weapon) or isinstance(item, items.Shield) or isinstance(item, items.Armor):\n if item in self.hero.inventory:\n self.donations.append(item)\n self.hero.drop(item)\n self.sort_donations()\n prompt(\"\\\"Thank you for your donation.\\\"\")\n else:\n prompt(\"You don't have one!\")\n\n # If item is a real item but is not in the above classes, do not accept.\n elif item != False:\n prompt(\"That type of item is not needed.\")",
"def add_donations(self, donations):\n if not isinstance(donations, (list, tuple)):\n donations = [donations] # Wrap non-lists as lists to use same code\n # as for lists/tuples.\n\n for donation in donations:\n if not donation: # if passed an empty string or list just continue\n continue\n try:\n donation_float = float(donation)\n except ValueError:\n raise ValueError(f\"Could not convert to float: {donation}\")\n\n if donation_float < 0:\n raise ValueError(f\"Invalid donation, must be greater than or\\\n equal to 0 {donation_float}\")\n else:\n self.donations.append(donation_float)",
"def add_donation_neo(self, email, donation_amount):\n\t\twith self.driver.session() as session:\n\t\t\ttry:\n\t\t\t\tcyph = \"\"\"\n\t\t\t\tMATCH (d1:Donor {email: '%s'})\n\t\t\t\tCREATE (d1)-[donate:DONATION]->({donation_amount: '%s'})\n\t\t\t\tRETURN d1\n\t\t\t\t\"\"\" % (email, donation_amount)\n\t\t\t\tsession.run(cyph)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f'Error adding donation to: {email}')\n\t\t\t\tprint(e)",
"def send_thank_you():\n full_name = input(\"Enter the donor's full name > \")\n \n donors = donor_names()\n \n if full_name == 'list':\n #Print the donor names and restart the function\n for name in donors:\n print(name)\n \n send_thank_you()\n \n amount = float( input(\"Enter the donation amount > \") )\n \n for k, v in donor_db.items():\n if k == full_name:\n donor_db[full_name] = v.append(amount)\n break\n else:\n donor_db[full_name] = amount \n\n print( thank_you_letter(thanks_dict = {full_name: amount})[full_name] )",
"def create_donor(fullname):\n donors[fullname] = []"
]
| [
"0.7508495",
"0.70032024",
"0.68972236",
"0.6864651",
"0.6788043",
"0.6714619",
"0.6544379",
"0.6538843",
"0.65385973",
"0.6519153",
"0.64578503",
"0.6412785",
"0.63529754",
"0.6223426",
"0.60587966",
"0.5997739",
"0.59638566",
"0.5864353",
"0.5864353",
"0.5851722",
"0.5806147",
"0.576911",
"0.5717156",
"0.5704965",
"0.56883264",
"0.5665537",
"0.5656079",
"0.5636952",
"0.5627931",
"0.5625472"
]
| 0.8338865 | 0 |
Print email with donor name and donation amount. | def display_email(donor, donation_amount):
print("\nDear {},\n Thank you for your generous donation of ${}. "
"We here at the Seattle Toaster Enthusiasts Association "
"will use the money to fund our annual pilgrimage to "
"Stanley North Dakota, the toaster mecca of the midwest.\n"
"Thank you very much,\n Margie Plumwhistle, President S.T.E.A\n"
"".format(donor, donation_amount)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_email(self):\n email_dict = {'donor_name':self.name,\n 'donation_amount':self.last_donation(),\n 'total_amount':self.total_donations()}\n\n # Create formatted email that can be copied & pasted\n email = ('\\n'.join(['Dear {donor_name},','',\n 'Thank you for your generous donation of ${donation_amount:.2f}.',\n 'To date, you have donated a total of ${total_amount:.2f} to our charity.',\n 'Your contributions help new arrivals receive the highest quality care possible.',\n 'Please know that your donations make a world of difference!',\n '','Sincerely,','The Good Place Team'])).format(**email_dict)\n\n return(email)",
"def send_email(name, amount):\n print(\"Thank you, {}, for your donation of ${}!\"\n \"Your contributions are always welcome and look forward to future donations.\".format(name, amount))",
"def get_donor_email(self):\n input_name = self.get_donor()\n if input_name in self.all_donors:\n print(self.r.hget(input_name, 'email'))",
"def compose_email(self, donation_id=-1, write_totals=False):\n if not write_totals:\n amount = self.donations[donation_id]\n else:\n amount = self.total_donations\n email_string = f\"\\nDear {self.name},\\n Thank you for your generous\\\n gift of ${amount:.2f}! It will help Local Charity achieve our mission.\\n\\\n Best regards,\\n\\\n Local Charity\\n\\n\"\n return email_string",
"def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"",
"def send_donor_email(donor):\n msg = Message('New Donor ID', \n sender='[email protected]', \n recipients=[donor.email])\n msg.body = f\"\"\"Thank you, {donor.first_name} for choosing to donate blood!\n Your Donor ID is {donor.id}\n We appreciate your blood!\"\"\"\n try:\n mail.send(msg)\n except Exception as e:\n print(e)",
"def print_thank_you_total(donor):\n # donor comes in with last donation date , last_donation and fullname\n # pull donor total form global total list -\n for d in donor_totals_list:\n if d.fullname == donor.fullname:\n donation_total = d.donation_total\n\n thank_you = '''\\n\\nDear {}\n\n Thank you for your most recent generous donation of ${:,.2f}. You're support of ${:,.2f}\n over the years has helped us fund many great programs! We wanted to write you to thank you and that we \n look forward to your continued support!\n\n Sincerely,\n\n The ChickTech Donations Department'''.format(donor.fullname, donor.last_donation, donation_total)\n return thank_you",
"def print_donor_totals_report():\n # # Creating list to hold donors info for printing\n update_lists()\n try:\n print()\n title = ['Donor Name', '| Total Given ', '| Num Gifts',\n ' | Average Gift']\n print('{:<20}{:>14}{:^14}{:>14}'.format(title[0], title[1],\n title[2], title[3]))\n print('-'*65)\n print()\n for donor in donor_totals_list:\n average_gift = float(donor.donation_total) / donor.num_donations\n print('{:<22}{}{:>12.2f}{:>10}{:>8}{:>12.2f}'.format(donor.fullname, '$', donor.donation_total,\n donor.num_donations, '$', average_gift))\n print()\n\n except Exception as e:\n logger.info(f'Error printing donor list at {donor.fullname}')\n logger.info(e)",
"def print_all_donor_donations():\n print(\"\\nList of Donors and Donations\")\n print(\"\\nDonor Name - Donation Date - Donation Amount:\")\n print(\"-\"*40)\n for donation in donor_donations_list:\n print(f'{donation.fullname} - {donation.donation_date} - ${donation.donation_amount:,.2f}')\n print()",
"def print_single_donor_donations():\n update_lists()\n donor_name = get_name_input()\n single_donor_print(donor_name)\n return donor_name",
"def create_report(self):\n # Base setup\n line_out = ''\n line_out += \"{:<15} | {:^15} | {:^30}\\n\".format(\"Name\", \"Donations\", \"Email\")\n line_out += (\"-\"*65)\n print(line_out)\n\n # Setup line format to recieve ordered donor info \n for name in self.all_donors:\n line = \"{:<15} | {:^15} | {:^30}\".format(name, self.r.hget(name, 'donations'), self.r.hget(name, 'email'))\n print(line)",
"def __str__(self):\n email_template = '\\n'.join((f'\\n\\nDear {self._full_name},\\n',\n f'Thank you for your very kind donation of ${self.last_donation:.2f}.\\n',\n 'It will be put to very good use.\\n',\n ' Sincerely,',\n ' -The Team\\n'))\n return email_template",
"def print_letter(donor):\n message = \"Dearest, {}. Thank you so much for your generosity with your most recent donation of ${}. \\nSincerely.\"\n print(message.format(donor[0], donor[1][-1]))",
"def createThankYouEmail(self):\n result = (\"\\nDear {:s},\\n\\n\"\n \"\\tThank you so much for your generous donation of ${:,.2f}!\\n\\n\"\n \"\\tIt will be put to very good use.\\n\\n\"\n \"\\t\\tSincerely,\\n\\t\\t\\t- The Team\".format(self.name, self.getTotDonation())\n )\n return result",
"def donor_report():\n \"\"\"print(\"{:<15}{:5}{:5}{}\".format(\"Donor Name\", \"| Total Given\", \"| Num Gifts\", \"| Average Gift\"))\n print(\"{:-<70}\".format(\"\"))\n \n for i in range(len(donors)):\n print(\"{:25s} ${:11.2f} {:9s} ${:12.2f}\".format((donors[i][0]), sum(donors[i][1]), len(donors[i][1]),\n sum(donors[i][1]) // len(donors[i][1])))\"\"\"",
"def thank_you_message(self, name, donation_amount):\n\t\tthank_you_message = \"\\nThank you {0:s} for you generous donation of ${1:.2f}.\\n\".format(name, round(donation_amount,2))\n\t\treturn thank_you_message",
"def list_donations(self, caller):\n msg = \"{wDonations:{n\\n\"\n table = PrettyTable([\"{wGroup{n\", \"{wTotal{n\"])\n for donation in self.donations:\n table.add_row([str(donation.receiver), donation.amount])\n msg += str(table)\n caller.msg(msg)",
"def donor_letter_1(donor, donation):\n return ('''\n Dear {}\n\n Thank you for your very kind donation of ${:.2f}.\n It will be put to very good use helping the youth\n of your nation.\n\n Sincerely,\n The Youth Council\n ''').format(donor[0], donation)",
"def get_thank_you(self, donor):\r\n donor_dict = {'name': donor.name, 'donation': donor.donations[-1],\r\n 'num_donations': len(donor.donations)}\r\n donor_dict['multiple'] = 's' if len(donor.donations) > 1 else ''\r\n\r\n thankyou = ('Dear {name}:\\n'\r\n 'Thank you for your generous donation of '\r\n '${donation:.2f}.\\nI really appreciate your '\r\n '{num_donations}\\ndonation{multiple} to our '\r\n 'organization.\\nI assure you that your contributions '\r\n 'will be put to\\ngood use!\\n\\n'\r\n 'Regards,\\nBen').format(**donor_dict)\r\n return thankyou",
"def printthankyou(donorname):\n print(THANK_YOU_LETTER.format(name=donorname, amount=donor_db[donorname][-1]))",
"def create_report():\n print(\"Donor: | $ Total | Donations | $ Average |\")\n print(\"-\"*76)\n for item in donors:\n amt_total = float(sum(item[1]))\n num_total = int(len(item[1]))\n # Thousand separator as default. Careful with the space if we get some big donors.\n print(\"{:<26}|${:>15,.2f}|{:>15}|{:>15,.2f}\".format(item[0], amt_total, num_total, amt_total/num_total))",
"def send_single_thank_you():\n update_lists()\n donor_name = get_name_input()\n\n if donor_name == \"quit\":\n print(\"No donor name entered, exiting to menu\")\n else:\n donor_amount = check_number_input()\n\n if donor_name not in donor_totals_list:\n firstname, lastname = donor_name.split(\" \")\n add_donor(firstname, lastname, donor_name)\n add_donation(donor_name, donor_amount)\n else:\n for donor in donor_totals_list:\n if donor.fullname == donor_name:\n add_donation(donor_name, donor_amount)\n print('\\nDear {},'.format(donor_name))\n print('''\\tThank you for your generous donation of ${:,.2f}\\n\n Sincerely, \\nThe ChickTech Donations Department\\n'''.format(\n donor_amount))\n update_lists()",
"def create_report():\n names, totals, num_gifts, avg_gift = get_donor_summary(donors)\n print(f\"Donor Name{'':<20} | Total Given{'':>0} | Num Gifts{'':>0} | Average Gift{'':>0}\")\n print(f\"-\" * 72)\n for name, total, num_gift, avg_gift in zip(names, totals, num_gifts, avg_gift):\n print(f\"{name:<32}${total:>11}{num_gift:>12} ${avg_gift:>13}\")\n return None",
"def print_donor_list():\n print('Below are the existing donors: ')\n for donor in donors_data:\n print('\\t- ', donor[\"name\"], ' ', donor[\"donations\"])",
"def print_letter(donor):\n msg = get_thankyou_message(donor)\n print(msg)",
"def get_thankyou_message(donor):\n message = '''Dear {}, \n Thank you so much for your generosity with your most recent donation of ${}. \n It will be put to very good use.\n Sincerely.'''\n return message.format(donor[\"name\"], donor[\"donations\"][-1])",
"def _print_donors():\n print('Current donors: ' + ', '.join(_get_donor_names()))",
"def print_donor_report(database):\n name_max = 30\n\n rpt_title = \"Donor Name\" + ' ' * (name_max - 9) + \"| Total Given | Num Gifts | Average Gift\"\n print(rpt_title)\n print(\"-\" * len(rpt_title))\n\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n query = (Donor\n .select(Donor.name,\n fn.COUNT(Donation.amount).alias('ccount'),\n fn.SUM(Donation.amount).alias('csum'),\n fn.AVG(Donation.amount).alias('cavg'))\n .join(Donation, JOIN.LEFT_OUTER)\n .group_by(Donor.name)\n )\n\n for d in query:\n print(f\"{d.name:{name_max}} $ {d.csum:>10.2f} {d.ccount:>9} ${d.cavg:>12.2f}\")\n\n database.close()",
"def print_report(donors_list):\n width = 68\n print(\"-\" * width)\n header = (\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\")\n print(\"{:20} | {:15} | {:10} | {:12}\".format(*header))\n print(\"-\" * width)\n for index, donor in enumerate(donors_list):\n name = donor[0]\n total = sum(donor[1])\n num_gift = len(donor[1])\n average = total/num_gift\n print(\"{:22} ${:12,.2f} {:12d} ${:12,.2f}\".format(name, total, num_gift, average ))\n print(\"-\" * width)",
"def send_thank_you(fullname):\n print(f'Thank you {fullname} for your generous donation!')"
]
| [
"0.7494052",
"0.74666184",
"0.70844775",
"0.70617104",
"0.69554836",
"0.68940264",
"0.6838346",
"0.6830946",
"0.6789356",
"0.66041046",
"0.65294826",
"0.6522132",
"0.6489182",
"0.646513",
"0.64368844",
"0.6427846",
"0.6380565",
"0.6371538",
"0.63009727",
"0.6285065",
"0.62678003",
"0.626625",
"0.6245501",
"0.6215317",
"0.61887896",
"0.6108844",
"0.61003965",
"0.6094168",
"0.6091171",
"0.6052458"
]
| 0.8131426 | 0 |
Return sorted list of donors. | def sort_donor_list(donor_list):
sorted_list = sorted(
donor_list, key=lambda donor: sum(donor_list[donor]), reverse=True)
return sorted_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def donor_names(self):\n return list(self.donors)",
"def list_donors(self):\n return [donor.name for donor in self.donors]",
"def all_donors(self):\n return [item for item in self.r.keys()]",
"def sort_donations(self):\n intermed_list = []\n for item in self.donations:\n intermed_list.append((item.item_class, item))\n intermed_list.sort()\n self.donations = []\n for item in intermed_list:\n self.donations.append(item[1])",
"def donor_names(self):\n return [donor.name for donor in self.donors]",
"def all_donors_all_donation(self):\n for name in self.all_donors:\n person = self.r.hgetall(name)\n print(f\"Person: {name}\")\n for key, value in person.items():\n print(f\"{key}: {value}\")",
"def generate_report_data(self):\n # Get list of donors and custom sort using magic method\n donors = list(self.donors.values())\n donors.sort(reverse=True)\n report = [(donor.name, donor.total_donations(), donor.num_donations(),\n donor.average_donation()) for donor in donors]\n return report",
"def print_donor_list():\n print('Below are the existing donors: ')\n for donor in donors_data:\n print('\\t- ', donor[\"name\"], ' ', donor[\"donations\"])",
"def donor_names():\n names = list()\n for name in donor_db:\n names = names + [name[0]]\n return names",
"def print_donor_list():\n print('Below are the existing donors: ')\n for donor in donors_list:\n print('\\t- ', donor[0], ' ', donor[1])",
"def print_donor_list():\n print(data_base.donor_names)",
"def get_list_of_donors():\n try:\n logger.info('opening get_list_of_donors database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n return Donors.select()\n\n except Exception as e:\n logger.info(e)\n\n finally:\n database.close()",
"def display_list(d):\n print(\"\\nOur generous donors: \\n\")\n for donor_name in iter(d.donors):\n print(donor_name)\n print(\"\\n\")",
"def print_donors_names():\n update_lists()\n print(\"\\nDonors\")\n print(\"-\"*20)\n for donor in donor_names_list:\n print(donor.fullname)\n print()",
"def clients_sorted_by_rentals(self):\n rentals = self.get_list()\n number_of_rented_movies = dict.fromkeys([rental.client for rental in rentals], 0)\n for rental in rentals:\n number_of_rented_movies[rental.client] += 1\n items = sorted(number_of_rented_movies.items(), key = lambda item: item[1], reverse=True)\n return [ClientDTO(item[0], item[1]) for item in items]",
"def print_all_donor_donations():\n print(\"\\nList of Donors and Donations\")\n print(\"\\nDonor Name - Donation Date - Donation Amount:\")\n print(\"-\"*40)\n for donation in donor_donations_list:\n print(f'{donation.fullname} - {donation.donation_date} - ${donation.donation_amount:,.2f}')\n print()",
"def get_list_of_donations():\n try:\n logger.info('opening get_list_of_donations database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n query_results = (Donations.select(Donations.id, Donations.donation_date,\n Donations.donation_amount, Donations.donated_by_id.alias('fullname')))\n return query_results\n except Exception as e:\n logger.info(f'Error getting list of donors')\n logger.info(e)\n\n finally:\n logger.info('closing get_list_of_donations database call')\n database.close()",
"def donor_names():\n return list(donor_db.keys())",
"def donations(self):\n return self.caller.player.Dominion.assets.donations.all().order_by(\"amount\")",
"def printdonorlist():\n for name in donor_db:\n print(name)",
"def _get_donor_names():\n return tuple(x[0] for x in _donors)",
"def diamonds(self):\n return sorted(tuple([v for v in self if v.suit == 'diamonds']), reverse=True)",
"def clients_sorted_by_name(self):\n rentals = self.get_list()\n rentals = sorted(rentals, key = lambda rental: rental.client.full_name)\n return [rental.client for rental in rentals]",
"def donor_names():\n return donor_db.keys()",
"def gen_donor():\n# <<<<<<< master\n return [donor for donor in donor_data]",
"def _print_donors():\n print('Current donors: ' + ', '.join(_get_donor_names()))",
"def sorted_carnivores(self):\n fitness_dict = {carn: carn.fitness for carn in self.carnivores}\n sorted_tuples = dict(sorted(fitness_dict.items(), key=lambda x: x[1], reverse=True))\n\n return list(sorted_tuples.keys())",
"def sortdb():\n return sorted(donor_db.items(), key=sumdbkey, reverse=True)",
"def movies_sorted_by_rentals(self):\n rentals = self.get_list()\n number_of_rentals = dict.fromkeys([rental.movie for rental in rentals], 0)\n for rental in rentals:\n number_of_rentals[rental.movie] += 1\n items = sorted(number_of_rentals.items(), key = lambda item: item[1], reverse=True)\n return [MovieDTO(item[0], item[1]) for item in items]",
"def test_data():\n return [Donor(\"David Andrews\", [200.50, 400.00, 250.75]),\n Donor(\"John Goodfellow\", [25.00, 175.50]),\n Donor(\"Mary Suzuki\", [75.00, 125.00, 250.00]),\n Donor(\"Bonney Lake\", [500.50, 700.75, 500.25]),\n Donor(\"DeMarcus Rollins\", [155.00, 165.00])\n ]"
]
| [
"0.71160483",
"0.7068133",
"0.7037995",
"0.701818",
"0.6718538",
"0.6578298",
"0.6562123",
"0.6495291",
"0.64889157",
"0.64533186",
"0.63100153",
"0.62594986",
"0.62133366",
"0.620444",
"0.61822283",
"0.6161308",
"0.61132234",
"0.60947376",
"0.60909027",
"0.6063817",
"0.6016411",
"0.6011586",
"0.60000974",
"0.59948766",
"0.59567356",
"0.58670586",
"0.58463895",
"0.57968104",
"0.57909244",
"0.5692218"
]
| 0.72371894 | 0 |
calculates the minimum number of bits a cardinality fits in assume the max bits are 32 | def number_bits_in_cardinality(self,card):
return 32 - self.count_lead_zs(card) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _bit_storing_size(n):\n return -((-n) // 8)",
"def _get_nr_of_bits(self):\n return sum(self._size_var)",
"def _num_32_bit_words_for_bit_fields(bit_fields):\n num_buckets, cur_bucket = 0, 0\n for field in bit_fields:\n if field.size + cur_bucket > 32:\n num_buckets += 1\n cur_bucket = 0\n cur_bucket += field.size\n return num_buckets + (cur_bucket > 0)",
"def _max_bits_used_in_function_in_round():\n word_size = 16\n bits_occupied = [word_size] * len(cost_functions)\n for (pa, pb) in all_possible_pairs:\n for i in range(len(cost_functions)):\n max_sum_of_cost = num_pairings_in_round * \\\n cost_functions[i](pa, pb)\n while (max_sum_of_cost >= 2**bits_occupied[i]):\n bits_occupied[i] *= 2\n bits_occupied = [2*b for b in bits_occupied] # Paranoia\n for b in bits_occupied:\n assert(b % word_size == 0)\n return max(bits_occupied)",
"def bit_smarter(limit):\n c_lengths = {}\n\n for s in range(1, limit+1):\n c_lengths[s] = s_collatz_length(s, c_lengths)\n\n return max(c_lengths, key=lambda x: c_lengths[x])",
"def _calculate_bit_size(self, history: sizing_executor.SizeAndDTypes) -> int:\n bit_size = 0\n for num_elements, dtype in history:\n bit_size += num_elements * self._bits_per_element(dtype)\n return bit_size",
"def bitSizeOf() -> int:\n\n return 32",
"def numbits(n):\n return int(math.ceil(math.log(n, 2)))",
"def getrandbits(k: int) -> int:\n ...",
"def get_min_run(n):\n r = 0\n while n >= 64:\n r |= n & 1\n n >>= 1\n return n + r",
"def cardinality(self):\n from sage.arith.all import binomial\n n = self._size\n if n == 0:\n return Integer(1)\n return (2 * binomial(4 * n + 1, n - 1)) // (n * (n + 1))\n # return Integer(2 * factorial(4*n+1)/(factorial(n+1)*factorial(3*n+2)))",
"def count(bits: int) -> int:\n return len(to_list(bits)) # I'm lazy",
"def solution(n: int) -> int:\n binary_gap = 0\n count = 0\n # skip the lowest zeros\n while n and (n & 1) == 0:\n n = n >> 1\n while n:\n while n & 1:\n n = n >> 1\n while n and (n & 1) == 0:\n count += 1\n n = n >> 1\n if n & 1 and binary_gap < count:\n binary_gap = count\n count = 0\n return binary_gap",
"def find_optimal_compress_dim(tensor_shape, bit):\r\n index = 0\r\n curr_index = 0\r\n max_dim = 0\r\n for dim in tensor_shape:\r\n if (dim * bit) % 8 == 0:\r\n index = curr_index\r\n break\r\n if dim > max_dim:\r\n index = curr_index\r\n max_dim = dim\r\n curr_index += 1\r\n return index",
"def calBitLen(n, p):\n m = int(-(n*math.log(p))/BloomFilter.ln2p2)\n # round up to 32 bits\n if m%32: m += (32-m%32)\n return m",
"def bit_length(self, ???):",
"def test_bit_count_random_bit_size(self):\n bit_size = random.randint(1, 40)\n ops = [bitwise_operations.bit_count(self.five_255_bin, 0, bit_size)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n assert result[\"255\"] == bit_size",
"def bitSizeOf() -> int:\n\n return 64",
"def test_bit_count_bit_size_too_large(self):\n ops = [bitwise_operations.bit_count(self.zero_one_bin, 1, 81)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)",
"def bitSizeOf() -> int:\n\n return 1",
"def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')",
"def cardinality(self):\n return int(ifac(self.size))",
"def _bitsfor(maxval):\n maxvalbits = int(round(math.log(maxval) / math.log(2)))\n if maxval != (1 << maxvalbits):\n raise ValueError(\"maxval must be a power of 2, not %d\" % maxval)\n return maxvalbits",
"def __check_bit_size(self, value, num_bits):\n is_fit = False\n if value <= 2 ** num_bits - 1:\n is_fit = True\n return is_fit",
"def size_as_number_of_bits(size):\n\n if size == 0:\n return 0\n else:\n return len('{:b}'.format(size))",
"def minsize(self):# -> int:\r\n return 0",
"def cardinality(self):\n estimate = self._alpha * math.pow(self._m, 2) / sum(math.pow(2, -x) for x in self._registers)\n\n if estimate <= 2.5 * self._m:\n # get number of registers equal to zero\n empty_registers = self._registers.count(0)\n if empty_registers != 0:\n return self._linear_count(empty_registers)\n else:\n return estimate\n elif estimate <= ((1 << 32) / 30):\n return estimate\n else:\n return self._large_range_correction(estimate)",
"def python_int_bitwidth():\r\n # 'l' denotes a C long int, and the size is expressed in bytes.\r\n return struct.calcsize('l') * 8",
"def countBits(x):\n # from https://stackoverflow.com/questions/10874012/how-does-this-bit-manipulation-work-in-java/10874449#10874449\n # Used because of the O(log(n)) complexity\n\n x = x - ((x >> 1) & 0x55555555)\n x = (x & 0x33333333) + ((x >> 2) & 0x33333333)\n x = (x + (x >> 4)) & 0x0F0F0F0F\n x = x + (x >> 8)\n x = x + (x >> 16)\n return x & 0x0000003F",
"def find_minrun(n: int) -> int:\n r = 0 # Becomes 1 if any bits are shifted off\n assert n >= 0\n while n >= 64:\n # The target of this while-loop:\n # If n is an exact power of 2, return 32;\n # otherwise, return int k in [32,64] such that n/k is close to, but strictly \n # less than, an exact power of 2 that is larger than 2^1=2.\n \n # | is `OR by bits`, & is `AND by bits`. ie r = r|(n&1).\n # The next two lines of code work as follows:\n # 1. If n is an exact power of 2, then for all loops, n&1=0, r=r|0=0|0=0, \n # and n is halved, until n=64 and is halved to 32, with r=0, so returns 32.\n # 2. Otherwise, then there must be at least one `1` among the second to the \n # last digits of n's binary form, eg.10010000. We scan from the rightmost digit # to the left, and whenever a 1 is met, r is 1. n will decrease to the n//2^k \n # that is closest to but less than 64. The target is met.\n #\n # In essence, this procedure is simply taking the first 6 bits of n, and add \n # 1 if any of the remaining bits is 1 (we call a bit that is 1 a \"set bit\").\n\n r |= n & 1\n n >>= 1 # move n's binary form all 1 digit to the right, ie n = n // 2\n # If n < 64, just return n, since it is too small to bother with fancy stuff\n return n + r"
]
| [
"0.72264665",
"0.6784838",
"0.67456406",
"0.6685689",
"0.66846687",
"0.6517849",
"0.6465248",
"0.6445661",
"0.64269054",
"0.6421126",
"0.6415093",
"0.64109385",
"0.6407402",
"0.6380993",
"0.63640344",
"0.6360561",
"0.63488674",
"0.6340697",
"0.63147354",
"0.6313916",
"0.6311104",
"0.63036716",
"0.6297056",
"0.6290261",
"0.6278254",
"0.62617517",
"0.624818",
"0.6240107",
"0.62098134",
"0.6208415"
]
| 0.7505586 | 0 |
clone a source symbol to the current instance | def clone(self,source):
self.cardinality = source.cardinality
self.sax_character = source.sax_character
self.wildcardbits = source.wildcardbits | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n cdef SymbolTable result = SymbolTable.__new__(SymbolTable)\n result.table = new sym.SymbolTable(self.table[0])\n return result",
"def copy_from(self, source):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\tself.name = source.name",
"def clone_from(self, source):\n\n if type(source) is type(self):\n # special case using class internals\n self._clone_from_common_class(source)\n else:\n self._clone_from_general(source)",
"def clone(self):",
"def clone(self, clone=None):\r\n # copy specs from supplied object\r\n if clone is not None: [setattr(self, v, getattr(clone, v)) for v in vars(clone)]",
"def clone(self):\n return _libsbml.ReferenceGlyph_clone(self)",
"def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)",
"def clone(self):\n return _libsbml.SpeciesReferenceGlyph_clone(self)",
"def copy(self):",
"def clone(self):\n return _libsbml.SBase_clone(self)",
"def clone(self):\n if self.result_id is not None:\n new_id = self.module.new_id()\n else:\n new_id = None\n return Instruction(self.module, self.op_name, new_id, self.type_id,\n self.operands[:])",
"def clone(self):\n return _libsbml.GeneralGlyph_clone(self)",
"def clone(self):\n raise NotImplementedError",
"def clone(self):\n return self.__class__(self.name, *self)",
"def clone(self):\n return _libsbml.SpeciesGlyph_clone(self)",
"def clone(self):\n return _libsbml.Curve_clone(self)",
"def copy(self):\n kopy = self.__class__()\n # Copy the source net\n kopy.source_net = nx.DiGraph(self.source_net)\n return kopy",
"def copy(self): # real signature unknown; restored from __doc__\n pass",
"def copy(self): # real signature unknown; restored from __doc__\n pass",
"def copy(self): # real signature unknown; restored from __doc__\n pass",
"def clone(self):\n return _libsbml.Port_clone(self)",
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def copy (self):\n return self.__class__(self.name, self[:])",
"def copy(self) -> \"Z2Symmetries\":\n return deepcopy(self)",
"def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\treturn NamedObject.copy(self)",
"def clone(self):\n return _libsbml.Input_clone(self)",
"def new(name, source):",
"def copy(self):\n import copy as python_copy\n my_ssa = python_copy.copy(self) # fast shallow copy (avoid __init__)\n my_ssa.input_vars = my_ssa.input_vars[:]\n my_ssa.output_vars = my_ssa.output_vars[:]\n my_ssa.assignments = my_ssa.assignments.copy()\n return my_ssa",
"def clone(self):\n return _libsbml.ModifierSpeciesReference_clone(self)",
"def clone(self):\n return _libsbml.SBaseExtensionPoint_clone(self)"
]
| [
"0.6922512",
"0.69074273",
"0.6616771",
"0.6461163",
"0.63648504",
"0.6343204",
"0.6225818",
"0.6215719",
"0.6205845",
"0.6142562",
"0.613019",
"0.6129232",
"0.6114464",
"0.60964054",
"0.6074465",
"0.60679007",
"0.6059474",
"0.6058359",
"0.6058359",
"0.6058359",
"0.60366386",
"0.6034494",
"0.6015523",
"0.6009529",
"0.60081244",
"0.59862596",
"0.5972196",
"0.5961777",
"0.5946751",
"0.5946483"
]
| 0.7011077 | 0 |
Initialize a celeste source object from a celeste_df row | def init_celeste_source_from_df(self, celestedf_row, is_star=None):
if is_star in [True, False]:
celestedf_row.is_star = is_star
params = du.celestedf_row_to_params(celestedf_row)
src = self._source_type(params, model=self, imgs=self.images)
# add on some more info for tracking
src.objid = celestedf_row.objid
src.run = celestedf_row.run
src.camcol = celestedf_row.camcol
src.field = celestedf_row.field
return src | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, df):\n self.df = df",
"def __init__(self, df):\n self.original_data = df\n self.preprocessed_data = pd.DataFrame()",
"def __init__(self, df):\n self.data = df",
"def from_dataframe(cls, df, data_cls):\n pass",
"def from_dataframe(cls, dataframe):\n return cls(dataframe)",
"def __init__(self, df):\n self.original_data = df\n self.cleaned_data = pd.DataFrame()",
"def from_pandas(cls, df, data_cls):\n pass",
"def __init__(self, df_input=None):\n\n if df_input is None:\n pass\n else:\n try:\n self.df_input = pd.read_csv(df_input)\n except Exception as e:\n print(e)",
"def __init__(self, df):\n self.df = df\n self._set_hash()",
"def __init__(self, columns, values=[], row_number=None, source_row_number=None):\n self.columns = columns\n self.values = copy.copy(values)\n self.row_number = row_number\n self.source_row_number = source_row_number",
"def __init__(self, df):\n self.df = df\n self.min_interval_in_seconds = 99999999999",
"def __init__(self, row): # pylint: disable=super-init-not-called\n self._row = row\n self.entity_id = self._row.entity_id\n self.state = self._row.state\n self._attributes = None\n self._last_changed = None\n self._last_updated = None\n self._context = None",
"def __init__(self, df, df_test, n_days, length, style) :\n self.n_days = n_days\n self.length = length\n self.df = df\n self.df_test = df_test\n self.features = len(df.columns) - 1\n self.style = style\n self.df_true = df_test.copy()",
"def __init__(self, df, df_test, n_days, length, style) :\n self.n_days = n_days\n self.length = length\n self.df = df\n self.df_test = df_test\n self.features = len(df.columns) - 1\n self.style = style\n self.df_true = df_test.copy()",
"def __init__(\n self,\n dataframe: pd.DataFrame,\n cat_feats_cfg: dict,\n train: bool,\n ):\n # extract dataframe and the config values.\n self.cat_feats_cfg = cat_feats_cfg\n self.dataframe = dataframe\n self.train = train\n self.handle_na = cat_feats_cfg['handle_na']\n self.enc_types = cat_feats_cfg['enc_types']\n self.num_best = cat_feats_cfg['num_best']\n self.cat_feats = cat_feats_cfg['cols']\n self.target_cols = cat_feats_cfg['target_cols']\n self.output_path = cat_feats_cfg['output_path']\n # create empty dict's for storing encoders for features.\n if self.train:\n self.label_encoders = dict()\n self.binary_encoders = dict()\n self.ohe = None\n else:\n self.encoders = joblib.load(self.cat_feats_cfg['encoder_path'])\n # hanlde NAN values if true.\n if self.handle_na:\n for feat in self.cat_feats:\n self.dataframe.loc[:,feat] = self.dataframe.loc[:,feat].astype(str).fillna(\"-9999999\")\n self.dataframe_d_copy = self.dataframe.copy(deep=True)",
"def make_dataset(self, df, **kwargs):\n\t\treturn df",
"def _import_source_data(self, source_file: str) -> None:\n with open(source_file, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n self.cell_map.append(\n Cell(\n datamap_id=None,\n cell_key=row['cell_key'],\n cell_value=None, # have no need of a value in dm\n cell_reference=row['cell_reference'],\n template_sheet=row['template_sheet'],\n bg_colour=row['bg_colour'],\n fg_colour=row['fg_colour'],\n number_format=row['number_format'],\n verification_list=None))",
"def __init__(self, df, cat_features, enc_type, handle_na=False ):\n self.df = df\n self.cat_features = cat_features\n self.enc_type = enc_type\n self.label_encoder = dict()\n self.binary_encoder = dict()\n self.ohe = None\n self.handle_na = handle_na\n \n if self.handle_na:\n for cat in self.cat_features:\n self.df.loc[:,cat] = self.df.loc[:,cat].astype('str').fillna('-9999999')\n self.output_df = self.df.copy(deep=True)",
"def from_pandas_edgelist(\n self,\n pdf,\n source=\"source\",\n destination=\"destination\",\n edge_attr=None,\n weight=None,\n edge_id=None,\n edge_type=None,\n renumber=True,\n ):\n if not isinstance(pdf, pd.core.frame.DataFrame):\n raise TypeError(\"pdf input is not a Pandas DataFrame\")\n\n gdf = cudf.DataFrame.from_pandas(pdf)\n self.from_cudf_edgelist(\n gdf,\n source=source,\n destination=destination,\n edge_attr=edge_attr,\n weight=weight,\n edge_id=edge_id,\n edge_type=edge_type,\n renumber=renumber,\n )",
"def _dataframe_from_feather(fn, **kwargs):\n\treturn pd.read_feather(fn, **kwargs)",
"def __init__(\n self,\n row: Row | EventAsRow,\n event_data_cache: dict[str, dict[str, Any]],\n ) -> None:\n self.row = row\n self._event_data: dict[str, Any] | None = None\n self._event_data_cache = event_data_cache\n self.event_type: str | None = self.row.event_type\n self.entity_id: str | None = self.row.entity_id\n self.state = self.row.state\n self.context_id_bin: bytes | None = self.row.context_id_bin\n self.context_user_id_bin: bytes | None = self.row.context_user_id_bin\n self.context_parent_id_bin: bytes | None = self.row.context_parent_id_bin\n # We need to explicitly check for the row is EventAsRow as the unhappy path\n # to fetch row.data for Row is very expensive\n if type(row) is EventAsRow: # noqa: E721\n # If its an EventAsRow we can avoid the whole\n # json decode process as we already have the data\n self.data = row.data\n return\n source = cast(str, self.row.event_data)\n if not source:\n self.data = {}\n elif event_data := self._event_data_cache.get(source):\n self.data = event_data\n else:\n self.data = self._event_data_cache[source] = cast(\n dict[str, Any], json_loads(source)\n )",
"def __init__(self, traindf, params):\n default_args = {\n \"seqin\": 0,\n \"smode\": \"positional\", #site mode\n \"direction\": \"inout\",\n \"positive_cores\" : [],\n \"poscols\": [],\n \"namecol\":\"Name\",\n \"seqcol\":\"Sequence\",\n }\n self.df = traindf\n self.set_attrs(params, default_args)\n if self.smode != \"relative\" and self.smode != \"positional\":\n raise TypeError(\"Smode can only be 'relative' or 'positional'\")\n if self.direction != \"inout\" and self.direction != \"orientation\":\n raise TypeError(\"Direction can only be 'inout' or 'orientation'\")\n if self.direction == \"orientation\" and (\"positive_cores\" not in params or not params[\"positive_cores\"]):\n raise TypeError(\"Positive cores are needed when direction is 'orientation'\")\n\n if self.namecol in self.df:\n fastadict = dict(zip(self.df[self.namecol], self.df[self.seqcol]))\n shapeobj = ds.DNAShape(fastadict)\n else:\n shapeobj = ds.DNAShape(self.df[self.seqcol].tolist())\n self.shapes = {k:getattr(shapeobj,k.lower()) for k in shapeobj.shapetypes}\n # make a dictionary of list instead of nested dictionary since we use this\n # as features\n if self.namecol in self.df:\n namelist = self.df[self.namecol].tolist()\n else:\n namelist = next(iter(self.shapes.values())).keys()\n self.shapes = {k:[v[str(n)] for n in namelist] for k, v in self.shapes.items()}\n if self.direction == \"orientation\":\n ori = Orientation(self.df, {\"positive_cores\":self.positive_cores}).get_feature()\n self.df[\"orientation\"] = [o[\"ori\"] for o in ori]",
"def __init__(self, *args, **kwargs):\n AssembledFeatureReader.__init__(self, *args, **kwargs)\n self.extra_columns = kwargs.get(\"extra_columns\", 0)",
"def __init__(self, data_frame, mins_set):\n # super(FeaturePrevDelays, self).__init__()\n self.df = data_frame.copy()\n self.mins_set = mins_set",
"def from_arrow(cls, dataframe_location):\n if isinstance(dataframe_location, str):\n df = cudf.DataFrame.from_arrow(read_arrow(dataframe_location))\n else:\n df = cudf.DataFrame.from_arrow(dataframe_location)\n return cls(df)",
"def set_new_df(self, new_df):\n\n try:\n if(isinstance(new_df, pd.DataFrame)):\n self.df_input = new_df\n if(isinstance(new_df, str)):\n self.df_input = pd.read_csv(new_df)\n except Exception as e:\n print(e)",
"def from_sdf(self, **kwargs):\n return self.__from_file(kwargs, _sdf)",
"def __init__(self, source_file, template):\n self.source_file = source_file\n self.serializer = template.serializer\n self.headers = template.fields\n # sometimes empty cells are treated as cells with values. So we need to trim them.\n self.slice_index = len(self.headers)\n\n self.rows_imported = 0\n self.rows_skipped = 0\n\n self.objects = []\n self.is_parsed = False\n self.errors = {}\n\n self._is_valid = None\n self._iterator = None",
"def __init__(self, data):\n self.data = data\n self.columns = Columns(data)\n self.rows = Rows(data)",
"def read_dataframe(cls, df, model_type=\"global\"):\n sites = df['sites']\n values = df['values']\n order = max([len(site) for site in sites])\n self = cls(sites, order=order, values=values, model_type=model_type)\n return self"
]
| [
"0.6318052",
"0.6287097",
"0.6159056",
"0.6123915",
"0.5904711",
"0.5873366",
"0.5849374",
"0.57739514",
"0.57281756",
"0.56613076",
"0.5628908",
"0.5582114",
"0.558189",
"0.558189",
"0.5538221",
"0.5534507",
"0.55192184",
"0.5515733",
"0.55035",
"0.55004627",
"0.54787076",
"0.54780525",
"0.5466561",
"0.5462554",
"0.5456952",
"0.5454529",
"0.54534745",
"0.54328173",
"0.5429283",
"0.5407284"
]
| 0.80212986 | 0 |
add a field (run/camcol/field) image information to the model img_dict = fits image keyed by 'ugriz' band init_srcs = sources (tractor or celeste) initialized in this field | def add_field(self, img_dict):
for k in img_dict.keys():
assert k in self.bands, "Celeste model doesn't support band %s"%k
self.field_list.append(Field(img_dict)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_field(self, field_data):\n def_field = {'id':None,\n 'ref':None,\n 'posx':'0',\n 'posy':'0',\n 'size':'50',\n 'text_orientation':'H',\n 'visible':'V',\n 'text_align':'L',\n 'props':'CNN'\n }\n\n field = dict(list(def_field.items()) + list(field_data.items()))\n #field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field",
"def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'",
"def show_field(self, vehicles, type):\n\n # starting pixels x = 0, y = 0 on field image\n start_x = 78\n start_y = 45\n\n # block pixel width is slightly different per field size\n if self.size == 6:\n block_width = 72\n elif self.size == 9:\n block_width = 69\n elif self.size == 12:\n block_width = 68.5\n\n field = plt.imread(f\"data/RushHourImages/RushHour{self.size}.jpg\")\n fig, ax = plt.subplots()\n plt.imshow(field)\n plt.axis('off')\n\n for vehicle in vehicles:\n if vehicle.orientation == 'H':\n x = start_x + (vehicle.x * block_width)\n y = start_y + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck{vehicle.id}.png\")\n\n # truck: the image coordinate is his middle, which changes with the length of the car\n x += 40\n\n if vehicle.orientation == 'V':\n x = start_y + (vehicle.x * block_width)\n y = start_x + (vehicle.y * block_width)\n if vehicle.length == 2:\n car = plt.imread(f\"data/RushHourImages/Car-rotated{vehicle.id}.png\")\n else:\n car = plt.imread(f\"data/RushHourImages/Truck-rotated{vehicle.id}.png\")\n y += 40\n\n if self.size == 6:\n imagebox = OffsetImage(car, zoom=0.6)\n elif self.size == 9:\n imagebox = OffsetImage(car, zoom=0.4)\n elif self.size == 12:\n imagebox = OffsetImage(car, zoom=0.3)\n\n imagebox.image.axes = ax\n xy = (x, y)\n ab = AnnotationBbox(imagebox, xy, frameon=False)\n ax.add_artist(ab)\n\n if type == True:\n plt.show(block=False)\n plt.pause(0.001)\n plt.close()\n else:\n plt.show()",
"def MakeFieldmaps(self):\n if self.verbose:\n print 'Compute fieldmaps.'\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n if self.info[entry]['imgfile'] == None:\n# Fieldmap data not found.\n return\n# Make a magnitude image for use in checking registration.\n cmd = 'convert_file -f0 -m0 %s %s nii' % \\\n (entry, self.info[entry]['magfile'])\n self.CheckExec(cmd, [self.info[entry]['magfile'] + '.nii'])\n\n# Make fieldmap. Use separate loop in case make_fmap aborts.\n for entry in self.info:\n if self.info[entry]['type'] == 'fmap':\n fmapname = self.info[entry]['imgfile']\n if not os.path.exists('%s.nii' % fmapname) or self.redo:\n# Couldn't find or existing fmap, compute a new one.\n if self.verbose:\n extra_args = '-v'\n else:\n extra_args = ''\n if self.info[entry]['correct_fmap_phase'] == 'force':\n extra_args += ' --force-slicecorr'\n elif self.info[entry]['correct_fmap_phase'] == 'omit':\n extra_args += ' --omit-slicecorr'\n cmd = 'make_fmap %s %s %s' % (extra_args, entry, fmapname)\n# error = self.ExecCmd(cmd, halt_on_error=False)\n if self.no_fmapcorr:\n halt_on_error = False\n else:\n halt_on_error = True\n error = self.CheckExec(cmd, ['%s.nii' % fmapname], \\\n halt_on_error=halt_on_error)\n if error:\n self.info[entry]['valid'] = False\n del self.fmaps[entry]",
"def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )",
"def __init__(self, MRIObj, pRFModelObj = None, FAModelObj = None,\n pRF_data = [], FA_data = [],\n prf_dm = [], max_ecc_ext = 5.5,\n pysub = 'hcp_999999', flatmap_height = 2048, full_figsize = (12, 8)):\n\n # set data object to use later on\n self.MRIObj = MRIObj\n\n # Load pRF and model object\n self.pRFModelObj = pRFModelObj\n self.FAModelObj = FAModelObj\n\n ## data to be plotted \n self.pRF_data = pRF_data\n self.FA_data = FA_data\n\n ## figure settings\n self.flatmap_height = flatmap_height\n self.full_figsize = full_figsize\n self.images = {}\n \n ## create pycortex vars\n self.mask, extents = cortex.quickflat.utils.get_flatmask(pysub, height = self.flatmap_height)\n self.vc = cortex.quickflat.utils._make_vertex_cache(pysub, height = self.flatmap_height)\n\n self.mask_index = np.zeros(self.mask.shape)\n self.mask_index[self.mask] = np.arange(self.mask.sum())\n\n # set prf dm\n self.prf_dm = prf_dm\n\n ## set grid of possible points in downsampled space\n self.point_grid_2D = np.array(np.meshgrid(np.linspace(-1, 1, prf_dm.shape[0]) * max_ecc_ext,\n np.linspace(1, -1, prf_dm.shape[0]) * max_ecc_ext))",
"def buildRunDictMain(self, ori_images):\n self.run_dict[\"Of\"] = {\n \"Run\": not self.of_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running optical flow\",\n }\n self.run_dict[\"Back_Of\"] = {\n \"Run\": not self.back_of_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running back optical flow\",\n }\n self.run_dict[\"Depth\"] = {\n \"Run\": not self.depth_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running depth estimation\",\n }\n self.run_dict[\"Speed\"] = {\n \"Run\": True,\n \"Progress\": ori_images,\n \"Text\": \"Running speed estimation\",\n }\n self.run_dict[\"Optimization\"] = {\n \"Run\": self.ui.c_optimize.isChecked(),\n \"Progress\": ori_images * 9,\n \"Text\": \"Running parameter optimization\",\n }\n\n self.run_dict[\"Of_Vid\"] = {\n \"Run\": self.ui.c_of.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating optical flow video\",\n }\n self.run_dict[\"Back_Of_Vid\"] = {\n \"Run\": self.ui.c_back_of.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating backward optical flow video\",\n }\n self.run_dict[\"Depth_Vid\"] = {\n \"Run\": self.ui.c_depth.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating depth estimation video\",\n }\n\n self.run_dict[\"Speed_Plot\"] = {\n \"Run\": self.ui.c_speed_plot.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for speed values\",\n }\n self.run_dict[\"Crash_Plot\"] = {\n \"Run\": self.ui.c_crash_plot.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for time to crash\",\n }\n self.run_dict[\"Error_Plot\"] = {\n \"Run\": self.ui.c_error_plot.isChecked() and self.gt_exist,\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for speed error\",\n }\n\n self.run_dict[\"Speed_Plot_Video\"] = {\n \"Run\": self.ui.c_speed_plot_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating speed plot video\",\n }\n self.run_dict[\"Error_Plot_Video\"] = {\n \"Run\": self.ui.c_error_plot_video.isChecked() and self.gt_exist,\n \"Progress\": ori_images,\n \"Text\": \"Creating error plot video\",\n }\n self.run_dict[\"Crash_Plot_Video\"] = {\n \"Run\": self.ui.c_crash_plot_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating time to crash plot video\",\n }\n\n self.run_dict[\"Super_Pixel_Video\"] = {\n \"Run\": self.ui.combo_superpixel.currentIndex() != 0\n and self.ui.c_super_pixel_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating super pixel video\",\n }\n self.run_dict[\"Super_Pixel_Label\"] = {\n \"Run\": self.create_super_pixel_label,\n \"Progress\": ori_images,\n \"Text\": \"Creating {0} superpixel labels\".format(self.super_pixel_method),\n }\n\n self.run_dict[\"Object_Detection\"] = {\n \"Run\": (\n self.ui.c_object_detection.isChecked()\n or self.ui.c_crash_plot.isChecked()\n )\n and not self.object_detection_dir_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running Object Detection\",\n }\n\n self.addAllProgressBar()\n self.buildParamsDict()\n self.saveUser()\n self.startCalcThread()",
"def add_files(self, file_dict):\n from xeye_calib import resize_rgb_b64\n if self.src_keys is None:\n self.src_keys, self.rgb_cam_list, self.rgb_of_depth_cam_list = init_cam_set(file_dict)\n self.src_keys_dict = {v: i for i, v in enumerate(self.src_keys)}\n logger.info('Init Calibrator done.')\n logger.info('src_keys_dict, {}'.format(self.src_keys_dict))\n logger.info('file_dict.keys, {}'.format(file_dict.keys()))\n for k, v in file_dict.items():\n filename = str(10000000 + self.counter)[1:]\n if k.startswith('cam'):\n if 'dept' in k:\n continue\n print(self.src_keys_dict.keys())\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.png')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n # print('calib data copy', v, dst_path)\n # print('calib data copy', v, dst_path, file=sys.stderr)\n # with open(self.record_path, 'a') as fout:\n # fout.write('cp ' + v + ' ' + dst_path + '\\n')\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n elif k.startswith('rgb'):\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.jpg')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n if self.resize_xeye:\n resize_rgb_b64(v, dst_path)\n else:\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n\n else:\n logger.warn('Unrocognize key: {}'.format(k))\n return\n self.counter += 1",
"def writeFitsImage( rs, cpuIndex, grid, projection):\n\n# print(\"Image: \", imageData)\n \n imageData = grid.image\n size = imageData.shape\n imageCopy = copy.deepcopy( imageData)\n nx = size[1]\n ny = size[0]\n\n # now flip the Y axis of the image to match the FITS Convention\n iy = ny - 1\n for iii in range(ny):\n imageCopy[iii][:] = imageData[iy][:]\n iy = iy - 1\n\n pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)\n\n # Create a new WCS object. The number of axes must be set\n # from the start\n w = wcs.WCS(naxis=2)\n\n gridtype = grid.gridtype.upper()\n print(\"Grid Type: %s %d\" % (gridtype, gridtype.find('RA')))\n# gridtype = \"RA\"\n if gridtype.find('RA') > -1:\n maptype = 'RA'\n XTYPE = 'RA--'\n YTYPE = 'DEC-'\n else:\n maptype = 'GAL'\n XTYPE = 'GLON'\n YTYPE = 'GLAT'\n xstart = 360.\n ystart = 90.\n\n# select the projection here:\n# projection = \"-CYP\"\n# projection = \"-CAR\"\n\n crval1 = grid.crval1\n crval2 = grid.crval2\n crpix1 = grid.crpix1\n crpix2 = grid.crpix2\n cdelt1 = grid.cdelt1\n cdelt2 = grid.cdelt2\n print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))\n\n hdu = fits.PrimaryHDU()\n header = hdu.header\n\n dateobs = \"%s\" % (rs.utc)\n dateobs = dateobs.replace(\" \",\"T\")\n mydate = datetime.datetime.now()\n mydate = \"%s\" % (mydate)\n mydate = mydate[2:10]\n mydate.replace('-','/')\n\n header['NAXIS1'] = int(nx)\n header['NAXIS2'] = int(ny)\n header['BUNIT'] = 'K-km/s/BEAM'\n maptype = \"RA\"\n if maptype[0:2] == \"RA\":\n maptype = \"RA\"\n header['CTYPE1'] = 'RA---CAR'\n else:\n maptype = \"GAL\"\n header['CTYPE1'] = 'GLON-CAR'\n\n # create a cartesian x centered iamge \n header['CRPIX1'] = nx/2.\n header['CRVAL1'] = 180.\n grid.crval1 = header['CRVAL1']\n header['CDELT1'] = cdelt1\n header['CUNIT1'] = 'deg'\n header['CRVAL2'] = (grid.ymax+grid.ymin)/2.\n grid.crval2 = header['CRVAL2']\n header['CRPIX2'] = ny/2.\n header['CDELT2'] = cdelt2\n header['CUNIT2'] = 'deg'\n\n grid.gridtype = maptype\n if maptype[0:2] == \"RA\":\n print(\"RA: writeFits: %s\" % (maptype))\n header['CTYPE2'] = 'DEC--CAR'\n else:\n print(\"GAL: writeFits: %s\" % (maptype))\n header['CTYPE2'] = 'GLAT-CAR'\n\n header['WCAXES'] = 2\n header['RADESYS'] ='FK5'\n\n# temporarily replace ref coordinate iwth zero\n crval2 = header['CRVAL2']\n crpix2 = header['CRPIX2']\n# redefine the reference for the best cartisian format \n referencevalue = 0.\n dpix = (referencevalue - crval2)/cdelt2\n crpix2 = crpix2 + dpix\n# change x axis\n header['CRVAL2'] = referencevalue\n header['CRPIX2'] = crpix2\n\n header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates\n header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm\n header['BMIN'] = 18.1 # Beam minor axis in degrees\n header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees\n header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz\n header['RESTWAV'] = 0.211061140551 # Line wavelength (m)\n header['DATE-OBS'] = dateobs\n header['DATE'] = mydate\n header['OBSERVER'] = 'Science Aficionado'\n header['OBJECT'] = 'Milky Way'\n header['TELESCOP'] = 'Aficionado Horn'\n header['HISTORY'] = \"GridSave.py -- Glen Langston -- 20 May 13\"\n header['HISTORY'] = \"Observations in March + April 2020\"\n\n# while len(header) < (36 * 4 - 1):\n# header.append() # Adds a blank card to the end\n# header.delval(\"EXTEND\")\n header.update()\n\n# hdu = fits.PrimaryHDU(header=header, data=imageData)\n hdu = fits.PrimaryHDU(header=header, data=imageCopy)\n\n # As file at filePath is deleted now, so we should check if file exists or not not before deleting them\n outname = (\"Aficionado_T%d\" % (cpuIndex)) + \"-\" + maptype + projection + \".fit\"\n if os.path.exists(outname):\n os.remove(outname)\n hdu.writeto(outname)\n\n# create a second file with new projection\n fixImageCoordinates( outname, projection)\n\n return",
"def _prep_roidb_entry(self, entry):\n im_path = os.path.join(self.root, 'images', self.img_dir_name, entry['file_name'])\n assert os.path.exists(im_path), 'Image \\'{}\\' not found'.format(im_path)\n entry['image'] = im_path\n entry['flipped'] = False\n # empty placeholders\n entry['bboxes'] = np.empty((0, 4), dtype=np.float32)\n entry['gt_classes'] = np.empty((0), dtype=np.int32)\n # remove unwanted fields that come from the json file\n for k in ['date_captured', 'coco_url', 'license', 'url', 'file_name', 'flickr_url']:\n if k in entry:\n del entry[k]",
"def build_filler_images(self):",
"def load_field(self,filename,unmask=True,timeslice=None,fieldname=None,\n check_for_grid_info=False,grid_info=None,grid_type='HD',\n **grid_kwargs):\n\n if not check_for_grid_info:\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n print(\"Reading input from {0}\".format(filename))\n with netCDF4.Dataset(filename,mode='r',format='NETCDF4') as dataset:\n if check_for_grid_info:\n latitudes = None\n longitudes = None\n for latitude_names in ['lat','y']:\n fields = dataset.get_variables_by_attributes(name=latitude_names)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n latitudes = fields[0][:]\n for longitude_names in ['lon','long','x']:\n fields = dataset.get_variables_by_attributes(name=longitude_names)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n longitudes = fields[0][:]\n elif len(fields) > 1:\n raise RuntimeError(\"File {0} contains\"\n \" multiple longitude fields\".format(filename))\n elif len(fields) > 1:\n raise RuntimeError(\"File {0} contains\"\n \" multiple latitude fields\".format(filename))\n if longitudes is not None:\n grid = gd.makeGrid('LatLong',nlat=len(latitudes),nlong=len(longitudes))\n grid.set_latitude_points(np.asarray(latitudes))\n grid.set_longitude_points(np.asarray(longitudes))\n grid_info.append(grid)\n else:\n grid = gd.makeGrid(grid_type,**grid_kwargs)\n fields = None\n if fieldname is None:\n potential_field_names = ['Topo','topo','field_value','orog','z','ICEM',\n 'DEPTO','usurf','bats','slm','FDIR','lsmask',\n 'lake_field','river_flow',\n 'basin_catchment_numbers','rdirs','lsm',\n \"cumulative_flow\",\"catchments\",\n \"cumulative_flow_to_ocean\",\"acc\",\"catch\",\"rdir\"]\n else:\n potential_field_names = [fieldname]\n for potential_field_name in potential_field_names:\n fields = dataset.get_variables_by_attributes(name=potential_field_name)\n if len(fields) >= 1:\n break\n if len(fields) == 1:\n if timeslice is not None:\n field_slice = fields[0][timeslice,:,:]\n else:\n field_slice = fields[0][:]\n if grid_type==\"generic_1d\":\n if unmask:\n return np.asarray(field_slice)\n else:\n return np.asanyarray(field_slice)\n else:\n if unmask:\n return np.asarray(field_slice.reshape(grid.get_grid_dimensions()))\n else:\n return np.asanyarray(field_slice.reshape(grid.get_grid_dimensions()))\n elif len(fields) > 1:\n raise RuntimeError('File {0} contains multiple fields'.format(filename))\n else:\n raise RuntimeError('Field not found in file {0}'.format(filename))",
"def pre_pipeline(self, results):\n results[\"img_prefix\"] = self.img_prefix\n results[\"seg_prefix\"] = self.seg_prefix\n results[\"proposal_file\"] = self.proposal_file\n results[\"bbox_fields\"] = []\n results[\"mask_fields\"] = []\n results[\"seg_fields\"] = []\n results[\"site_fields\"] = []\n results[\"label_fields\"] = []",
"def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,\n vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,\n frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',\n pixels=512, pixel_size=16e-6):\n self.name = name\n self.img_acq_type = img_acq_type\n\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n\n # supporting camera acquisition settings\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n\n if isinstance(pixels, int):\n self.pixels = (pixels, pixels)\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)",
"def __init__(self, field_type=None, image=None, field_rect=None, rfid_origin_dg=None, rfid_origin_dg_tag=None, rfid_origin_tag_entry=None, rfid_origin_entry_view=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._field_type = None\n self._image = None\n self._field_rect = None\n self._rfid_origin_dg = None\n self._rfid_origin_dg_tag = None\n self._rfid_origin_tag_entry = None\n self._rfid_origin_entry_view = None\n self.discriminator = None\n\n self.field_type = field_type\n self.image = image\n if field_rect is not None:\n self.field_rect = field_rect\n if rfid_origin_dg is not None:\n self.rfid_origin_dg = rfid_origin_dg\n if rfid_origin_dg_tag is not None:\n self.rfid_origin_dg_tag = rfid_origin_dg_tag\n if rfid_origin_tag_entry is not None:\n self.rfid_origin_tag_entry = rfid_origin_tag_entry\n if rfid_origin_entry_view is not None:\n self.rfid_origin_entry_view = rfid_origin_entry_view",
"def replace_map_image(self, mapframe, col, **kwargs):\n \n #Destroy the existing mapslider if present\n if self.mapslider_list[col]:\n self.mapslider_list[col].destroy()\n self.mapslider_label_list[col].destroy()\n \n #Pull the color fill settings and format them\n fill_color_title = self.translate(self.color_setting_name_list[col].get(),\n input_language=self.language,\n output_language='english')\n if fill_color_title == 'None':\n fill_color = []\n fill_color_title_input = []\n self.mapslider_list[col] = []\n self.datenumlist[col] = []\n self.datefieldlist[col] = []\n self.map_temporalflag[col] = 0\n else:\n fill_color = [self.color_field_modes[self.color_longname_modes_inverted[fill_color_title]]]\n fill_color_title_input = fill_color_title\n \n if fill_color:\n testfield = self.fieldnamelookup(fill_color[0], self.shp_fields)\n vis_params = testfield.vis_params\n \n #Generate a new mapslider if the field is temporal\n if testfield.temporal_flag:\n self.map_temporalflag[col] = testfield.temporal_flag\n if 'slideval' in kwargs:\n slideval = kwargs.pop('slideval')\n else:\n slideval = 0\n fill_color = [self.make_map_slider(self.frame_map_list[col], col, fill_color[0], slideval=slideval)]\n self.date_setting_list[col] = fill_color\n else:\n self.mapslider_list[col] = []\n self.datenumlist[col] = []\n self.datefieldlist[col] = []\n self.map_temporalflag[col] = 0\n else:\n vis_params = [[],[],[]]\n \n #Destroy the existing imgslider if present\n if self.imgslider_list[col]:\n self.imgslider_list[col].destroy()\n self.imgslider_label_list[col].destroy()\n \n #Pull the image visualization settings and format them\n image_title = self.translate(self.image_setting_name_list[col].get(),\n input_language=self.language,\n output_language='english')\n image_path = self.image_dict[image_title]\n \n if image_title == 'None':\n img_params = [[],[],[], [], []]\n self.imgslider_list[col] = [] \n self.img_temporalflag[col] = 0 \n self.img_datenumlist[col] = [] \n self.img_date_setting_list[col] = [] \n new_map_loc = self.map_loc\n else:\n testimg = self.imagenamelookup(image_title, self.image_filepath)\n img_params = testimg.vis_params\n if testimg.lat:\n lat = testimg.lat\n else:\n lat = self.map_loc[0]\n if testimg.lon:\n lon = testimg.lon\n else:\n lon = self.map_loc[1]\n if testimg.zoom:\n zoom = testimg.zoom\n else:\n zoom = self.map_loc[2]\n \n new_map_loc = [lat,lon,zoom]\n \n #Generate a new imgslider if the image is temporal\n if testimg.temporal_flag:\n self.img_temporalflag[col] = testimg.temporal_flag\n if 'img_slideval' in kwargs:\n img_slideval = kwargs.pop('img_slideval')\n else:\n img_slideval = 0\n img_band = self.make_image_slider(self.frame_map_list[col], col, testimg, img_slideval=img_slideval)\n self.img_date_setting_list[col] = img_band\n img_params.append(self.img_date_setting_list[col])\n else:\n self.imgslider_list[col] = []\n self.img_temporalflag[col] = 0 \n self.img_datenumlist[col] = [] \n self.img_date_setting_list[col] = [] \n img_params.append([])\n \n #Delete exisiting map\n self.MAP_list[col].delete(\"all\")\n slaveitems = mapframe.slaves()\n for item in slaveitems:\n item.destroy() \n griditems = mapframe.grid_slaves()\n for item in griditems:\n item.destroy()\n \n #Generate the new map\n self.MAP_list[col] = MapWindow.Map(mapframe,\n self.shps,\n background_image = image_path, \n color_range = fill_color,\n color_title = fill_color_title_input,\n color_params = vis_params,\n image_params = img_params,\n lat_lon_zoom = new_map_loc,\n null_zeros=1,\n window_dimensions = [self.screenwidth,self.screenheight])\n self.MAP_list[col].configure(bg='white')",
"def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever",
"def _extend_run_record_data_field(self, run_idx, run_record_key,\n field_name, field_data):\n\n records_grp = self.h5['{}/{}/{}'.format(RUNS, run_idx, run_record_key)]\n field = records_grp[field_name]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check whether it is a variable length record, by getting the\n # record dataset dtype and using the checker to see if it is\n # the vlen special type in h5py\n if h5py.check_dtype(vlen=field.dtype) is not None:\n\n # if it is we have to treat it differently, since it\n # cannot be multidimensional\n\n # if the dataset has no data in it we need to reshape it\n if all([i == 0 for i in field.shape]):\n # initialize this array\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n field.resize( (n_new_frames,) )\n\n # set the new data to this\n for i, row in enumerate(field_data):\n field[i] = row\n\n # otherwise just add the data\n else:\n\n # resize the array but it is only of rank because\n # of variable length data\n field.resize( (field.shape[0] + n_new_frames, ) )\n\n # add each row to the newly made space\n for i, row in enumerate(field_data):\n field[(field.shape[0] - 1) + i] = row\n\n # if it is not variable length we don't have to treat it\n # differently\n else:\n\n # if this is empty we need to reshape the dataset to accomodate data\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n # otherwise just add the data\n else:\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data",
"def setImages( self, event_key, images ):\n print \"event index\",event_key[0]\n self.run = event_key[1]\n self.subrun = event_key[2]\n self.event_num = event_key[3]\n print self.run,self.subrun,self.event_num\n self.images = images\n #print self.images.img_v\n #for img in self.images.img_v:\n # print img.shape\n self.labeltools.setImage( event_key[0], self.images )",
"def _setup_new_image(self):\n\n if not self._viewer is None:\n if not self._input_image is None:\n self._viewer.SetInput(self._input_image)\n else:\n self._viewer.SetInput(self._dummy_image_source.GetOutput())\n\n ii = self._viewer.GetInput()\n \n ii.UpdateInformation()\n ii.Update()\n range = ii.GetScalarRange()\n self._viewer.SetColorWindow(range[1] - range[0])\n self._viewer.SetColorLevel(0.5 * (range[1] + range[0]))\n \n icp = self._view_frame._image_control_panel\n icp.slider.SetRange(self._viewer.GetSliceMin(),\n self._viewer.GetSliceMax())\n icp.slider.SetValue(self._viewer.GetSliceMin())\n \n #self._viewer.UpdateDisplayExtent()\n self._viewer.GetRenderer().ResetCamera()",
"def load_car_data(\n instance_dir, use_optimized_cameras=True, image_size=256, pad_size=0.1\n):\n annotations_json = osp.join(instance_dir, \"annotations.json\")\n with open(annotations_json) as f:\n annotations = json.load(f)\n data_dict = {\n \"bbox\": [], # (N, 4).\n \"crop_scales\": [], # (N,).\n \"image_centers\": [], # (N, 2).\n \"images\": [], # (N, 256, 256, 3).\n \"images_og\": [], # (N, H, W, 3).\n \"initial_poses\": [], # (N, 3, 3).\n \"masks\": [], # (N, 256, 256).\n \"masks_dt\": [], # (N, 256, 256).\n }\n for annotation in annotations[\"annotations\"]:\n filename = osp.join(instance_dir, \"images\", annotation[\"filename\"])\n\n # Make a square bbox.\n bbox = np.array(annotation[\"bbox\"])\n center = ((bbox[:2] + bbox[2:]) / 2.0).astype(int)\n s = (max(bbox[2:] - bbox[:2]) / 2.0 * (1 + pad_size)).astype(int)\n square_bbox = np.concatenate([center - s, center + s])\n\n # Load image and mask.\n image_og = Image.open(filename).convert(\"RGB\")\n mask = Image.fromarray(rle_to_binary_mask(annotation[\"mask\"]))\n\n # Crop image and mask.\n image = image_util.crop_image(image_og, square_bbox)\n image = np.array(image.resize((image_size, image_size), Image.LANCZOS)) / 255.0\n mask = image_util.crop_image(mask, square_bbox)\n mask = np.array(mask.resize((image_size, image_size), Image.BILINEAR)) > 0.5\n image_center, crop_scale = compute_crop_parameters(image_og.size, square_bbox)\n if use_optimized_cameras:\n initial_pose = annotation[\"camera_optimized\"][\"R\"]\n else:\n initial_pose = annotation[\"camera_initial\"][\"R\"]\n data_dict[\"bbox\"].append(square_bbox)\n data_dict[\"crop_scales\"].append(crop_scale)\n data_dict[\"image_centers\"].append(image_center)\n data_dict[\"images\"].append(image)\n data_dict[\"images_og\"].append(image_og)\n data_dict[\"initial_poses\"].append(initial_pose)\n data_dict[\"masks\"].append(mask)\n data_dict[\"masks_dt\"].append(compute_distance_transform(mask))\n for k, v in data_dict.items():\n if k != \"images_og\": # Original images can have any resolution.\n data_dict[k] = np.stack(v)\n return data_dict",
"def __init__(self, raw_facil, raw_gir, raw_geo, proj):\n address1 = raw_facil.get('address1')\n address2 = raw_facil.get('address2')\n\n lon_lat = None\n if raw_geo:\n lon_lat = proj(\n raw_geo['longitude'],\n raw_geo['latitude'],\n inverse=True\n )\n\n self._init_attributes()\n self.source = 'facil-location'\n self.bldg_id = raw_facil['id']\n self.type = 'building'\n self.tags = []\n self.banner_abbreviation = raw_facil.get('abbreviation')\n self.name = raw_facil.get('name')\n self.campus = self._get_pretty_campus(raw_facil.get('campus'))\n self.address = self._get_address(address1, address2)\n self.city = raw_facil.get('city')\n self.state = raw_facil.get('state')\n self.zip = raw_facil.get('zip')\n self.geo_location = self._create_geo_location(\n lon_lat[0] if lon_lat else None,\n lon_lat[1] if lon_lat else None\n )\n self.geometry = self._create_geometry(\n raw_geo['coordinatesType'] if raw_geo else None,\n raw_geo['coordinates'] if raw_geo else None\n )\n self.gir_count = raw_gir['count'] if raw_gir else 0\n self.gir_limit = bool(raw_gir['limit'].strip()) if raw_gir and raw_gir['limit'] else None\n self.gir_locations = raw_gir['all'].strip() if raw_gir else None\n self.arcgis_abbreviation = (\n (raw_geo.get('abbreviation') if raw_geo else None)\n or (raw_gir.get('abbreviation') if raw_gir else None)\n )\n self.relationships = {'services': {'data': []}}\n self.merge = False\n self.open_hours = None\n self.description = None\n self.descriptionHtml = None\n self.images = None\n self.thumbnails = []\n self.website = None\n self.synonyms = None",
"def image(self,v):\n self.set('heightfield.image',v)\n #assert fileExists(environment.makeFilePath(v)), \"Warning: HeightField's image file, {}, not found in images folder.\".format(v) \n return self",
"def __init__(self, images=[], logfile='inspect_raw.info', load_log=True, \n master=None):\n if len(images) == 0:\n print('No images specified')\n return False\n \n if not os.path.exists(images[0]):\n print('First image not found (%s), is path correct?' %(images[0]))\n return False\n \n ##### Add .fits to filename and make backup if necessary\n self.logfile = logfile\n if not self.logfile.lower().endswith('.fits'):\n self.logfile += '.fits'\n \n if os.path.exists(self.logfile):\n bk = glob.glob(self.logfile+'.backup*')\n if len(bk) > 0:\n bkup_file = self.logfile + '.backup.%03d' %(len(bk))\n else:\n bkup_file = self.logfile + '.backup'\n \n shutil.copy(self.logfile, bkup_file)\n print('Made copy of %s -> %s' %(self.logfile, bkup_file))\n \n ####### Initialize parameters\n self.params = {} \n self.images = images\n \n self.marked_reads = None\n self.NREAD = 14\n \n ### Polygons for reads\n x0 = y0 = 12\n px = py = 6\n dx = dy = 241\n xi = np.array([0,1,1,0])\n yi = np.array([0,0,1,1])\n \n c = 0\n self.read_polygons = []\n for j in range(4):\n for i in range(4):\n c += 1\n if c > self.NREAD:\n break\n else:\n polyx = x0+i*(px+dx)+xi*dx\n polyy = y0+j*(py+dy)+yi*dy\n poly = np.array([polyx, polyy]).T\n self.read_polygons.append(mplPath.Path(poly))\n \n if os.path.exists(self.logfile) & load_log:\n self.read_fits()\n \n self.N = len(self.images)\n\n for key in ['satellite', 'earth', 'other', 'kill', 'seen']:\n if key not in self.params.keys():\n self.params[key] = np.zeros(self.N, dtype=np.int)\n \n if self.marked_reads is None:\n self.marked_reads = np.zeros((self.N, self.NREAD), dtype=int)\n \n if 'comment' not in self.params.keys():\n self.params['comment'] = ['---' for i in range(self.N)]\n \n self.i = 0\n self.master = master\n self.setup_gui()",
"def add_base64_files(self, file_dict):\n if self.src_keys is None:\n self.src_keys, self.rgb_cam_list, self.rgb_of_depth_cam_list = init_cam_set(file_dict)\n self.src_keys_dict = {v: i for i, v in enumerate(self.src_keys)}\n logger.info('Init Calibrator done.')\n for k, v in file_dict.items():\n filename = str(10000000 + self.counter)[1:]\n if k.startswith('cam'):\n if 'dept' in k:\n continue\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.jpg')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n # print('calib data copy', v, dst_path)\n # print >> sys.stderr, 'calib data copy', v, dst_path\n with open(dst_path, 'w') as fout:\n fout.write(base64.b64decode(v))\n elif k.startswith('xeye'):\n for i, imgb64 in enumerate(v):\n cam_id = self.src_keys_dict[('xeye_image', i)]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.png')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n if self.resize_xeye:\n with open(self.record_path, 'a') as fout:\n fout.write('resize ' + imgb64 + ' ' + dst_path + '\\n')\n # resize_xeye_image_file(imgpath, dst_path)\n else:\n with open(dst_path, 'w') as fout:\n fout.write(base64.b64decode(imgb64))\n else:\n logger.warn('Unrocognize key: {}'.format(k))\n return\n self.counter += 1",
"def __init__(self,bits):\n self.image_bits = bits",
"def generate_winstonlutz_multi_bb_multi_field(\n simulator: Simulator,\n field_layer: type[Layer],\n dir_out: str,\n field_offsets: list[list[float]],\n bb_offsets: list[list[float]] | list[dict[str, float]],\n field_size_mm: tuple[float, float] = (20, 20),\n final_layers: list[Layer] | None = None,\n bb_size_mm: float = 5,\n image_axes: ((int, int, int), ...) = (\n (0, 0, 0),\n (90, 0, 0),\n (180, 0, 0),\n (270, 0, 0),\n ),\n gantry_tilt: float = 0,\n gantry_sag: float = 0,\n clean_dir: bool = True,\n jitter_mm: float = 0,\n align_to_pixels: bool = True,\n) -> list[str]:\n if not osp.isdir(dir_out):\n os.mkdir(dir_out)\n if clean_dir:\n for pdir, _, files in os.walk(dir_out):\n [os.remove(osp.join(pdir, f)) for f in files]\n file_names = []\n for gantry, coll, couch in image_axes:\n sim_single = copy.copy(simulator)\n for field_offset in field_offsets:\n offset_mm_left = field_offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = field_offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -field_offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n long_offset += gantry_tilt * cos(gantry)\n gplane_offset += gantry_sag * sin(gantry)\n if align_to_pixels:\n long_offset = pixel_align(sim_single.pixel_size, long_offset)\n gplane_offset = pixel_align(sim_single.pixel_size, gplane_offset)\n sim_single.add_layer(\n field_layer(\n field_size_mm=field_size_mm,\n cax_offset_mm=(long_offset, gplane_offset),\n )\n )\n for offset in bb_offsets:\n if isinstance(offset, dict):\n offset_mm_left = offset[\"offset_left_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_up = offset[\"offset_up_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n offset_mm_in = -offset[\"offset_in_mm\"] + random.uniform(\n -jitter_mm, jitter_mm\n )\n else:\n offset_mm_left = offset[0] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_up = offset[1] + random.uniform(-jitter_mm, jitter_mm)\n offset_mm_in = -offset[2] + random.uniform(\n -jitter_mm, jitter_mm\n ) # negative because pixels increase as we go out, so to go in we subtract\n\n long_offset = bb_projection_long(\n offset_in=offset_mm_in,\n offset_up=offset_mm_up,\n offset_left=offset_mm_left,\n sad=1000,\n gantry=gantry,\n )\n gplane_offset = bb_projection_gantry_plane(\n offset_left=offset_mm_left,\n offset_up=offset_mm_up,\n sad=1000,\n gantry=gantry,\n )\n if align_to_pixels:\n long_offset = pixel_align(sim_single.pixel_size, long_offset)\n gplane_offset = pixel_align(sim_single.pixel_size, gplane_offset)\n sim_single.add_layer(\n PerfectBBLayer(\n cax_offset_mm=(\n long_offset,\n gplane_offset,\n ),\n bb_size_mm=bb_size_mm,\n )\n )\n if final_layers is not None:\n for layer in final_layers:\n sim_single.add_layer(layer)\n file_name = f\"WL G={gantry}, C={coll}, P={couch}; Field={field_size_mm}mm (shifts={field_offsets}); BB={bb_size_mm}mm @ left={offset_mm_left:.2f}, in={offset_mm_in:.2f}, up={offset_mm_up:.2f}; Gantry tilt={gantry_tilt}, Gantry sag={gantry_sag}.dcm\"\n sim_single.generate_dicom(\n osp.join(dir_out, file_name),\n gantry_angle=gantry,\n coll_angle=coll,\n table_angle=couch,\n )\n file_names.append(file_name)\n return file_names",
"def __init__(self, opt):\r\n super().__init__(opt)\r\n\r\n self.image_color = []\r\n for folder in self.annotations.keys():\r\n for image in self.annotations[folder].keys():\r\n img_path = os.path.join(self.root_dir, folder, image)\r\n camera, spec, n = image.split('_')\r\n if camera == 'BB':\r\n continue\r\n else:\r\n if spec == 'color':\r\n self.image_color.append(img_path)\r\n\r\n def sort_priority(x):\r\n *_, folder, name = x.split('/')\r\n folder_n = int(folder[1])\r\n folder_t = folder[2]\r\n name = int(name[0:-4].split('_')[-1])\r\n return folder_n, folder_t, name\r\n\r\n self.image_source, self.image_target = self._get_src_tgt(\r\n opt.augmentation_ratio, self.image_color, sort_priority)",
"def __init__(self, image):\n self.image = image",
"def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result"
]
| [
"0.5710676",
"0.5497638",
"0.54184586",
"0.5361319",
"0.5347474",
"0.5263473",
"0.524703",
"0.5237913",
"0.52199036",
"0.5189451",
"0.5172501",
"0.51476794",
"0.51453674",
"0.5102692",
"0.51013",
"0.50968385",
"0.5085219",
"0.5065007",
"0.50641096",
"0.50515676",
"0.50401753",
"0.5034054",
"0.5011783",
"0.5004175",
"0.49955848",
"0.49756604",
"0.49660397",
"0.4962356",
"0.49329928",
"0.49271777"
]
| 0.7162582 | 0 |
return brightest sources (by source type, band) | def get_brightest(self, object_type='star', num_srcs=1, band='r', return_idx=False):
fluxes = np.array([s.params.flux_dict[band] for s in self.srcs])
type_idx = np.where(self.source_types == object_type)[0]
type_fluxes = fluxes[type_idx]
type_idx = type_idx[np.argsort(type_fluxes)[::-1]][:num_srcs]
blist = [self.srcs[i] for i in type_idx]
if return_idx:
return blist, type_idx
else:
return blist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def brightest_source(frq=151, sliced_list=catalog.srcs):\n max_obj = sliced_list[0]\n for gleam_obj in sliced_list:\n if gleam_obj.flux_by_frq[frq] > max_obj.flux_by_frq[frq]:\n max_obj = gleam_obj\n print(\"Largest flux value encountered:\", max_obj.flux_by_frq[frq])\n print(\"Name of associated object:\", max_obj.name)\n print(\"Index of associated object:\", lookup(max_obj.name))\n return max_obj",
"def find_sources(image):\n from scipy import ndimage\n from astropy.stats import mad_std\n\n img1 = image.copy().astype('float32')\n m, s = np.median(image), mad_std(image)\n src_mask = image > m + 3.0 * s\n # set the background to the min value of the sources\n img1[~src_mask] = img1[src_mask].min()\n # this rescales (min,max) to (0,1)\n img1 = (img1.min() - img1) / (img1.min() - img1.max())\n img1[~src_mask] = 0.\n\n def obj_params_with_offset(img, labels, aslice, label_idx):\n y_offset = aslice[0].start\n x_offset = aslice[1].start\n thumb = img[aslice]\n lb = labels[aslice]\n yc, xc = ndimage.center_of_mass(thumb, labels=lb, index=label_idx)\n br = thumb[lb == label_idx].sum() # the intensity of the source\n return [br, xc + x_offset, yc + y_offset]\n\n srcs_labels, num_srcs = ndimage.label(img1)\n\n if num_srcs < 10:\n print(\"WARNING: Only %d sources found.\" % (num_srcs))\n\n # Eliminate here all 1 pixel sources\n all_objects = [[ind + 1, aslice] for ind, aslice\n in enumerate(ndimage.find_objects(srcs_labels))\n if srcs_labels[aslice].shape != (1, 1)]\n lum = np.array([obj_params_with_offset(img1, srcs_labels, aslice, lab_idx)\n for lab_idx, aslice in all_objects])\n\n lum = lum[lum[:, 0].argsort()[::-1]] # sort by brightness descending order\n\n return lum[:, 1:]",
"def getBrightest(source=None, elMin=20, elMax=87, sourceList=[], action=INCLUDE, \n numReturn=1, ignoreNorthSouth=True, coordsys=\"azel\",\n getOptical=False, fluxLimit=1.0, frequency=95) : \n # If source is None then use Polaris as the source as it is always up\n nsource = source \n if source == None: \n nsource = 'aumi'\n ignoreNorthSouth = True \n coordsys = \"azel\" \n # We need to get a bunch and then sort them and truncate after sorting...\n numToGet = 333 \n r = s.getNearest(nsource, elMin, elMax, sourceList, action, numToGet,\n ignoreNorthSouth, coordsys, getOptical, fluxLimit, frequency)\n print \"Number of results before trimming:\", len(r) \n def f(x, y):\n return cmp(y.brightness, x.brightness) \n r.sort(f) \n if source == None: \n for n in r:\n n.reference = \"None\"\n n.distance = 0\n n.azimuth = 0\n n.elevation = 0\n return r[:numReturn]",
"def broadbandfilters(self):\n all = self.allbroadbandfilters\n return [all[layer-1] for layer in self.__layers]",
"def calculate_band(value, bands):\n for band in bands:\n if band > value:\n return band",
"def band_selector(image, colors):\n # convert band to list for downstream compatibilty, if necessary\n if len(colors) == 3: #then it's an RGB image\n\n #housekeeping\n try:\n nbands = len(colors['band'])\n except: \n colors['band'] = [colors['band']]\n nbands = len(colors['band'])\n\n try:\n len(colors['dark_on_light'])\n except:\n colors['dark_on_light'] = [colors['dark_on_light']]\n\n if colors['colorspace'] is 'gray' or colors['colorspace'] is 'grey':\n colors['band'] = [0]\n nbands = 1\n if len(colors['dark_on_light']) > 1:\n raise ValueError(\n \"\"\"Can't interpret multiple arguments for 'dark_on_light' when \n 'colorspace' is {}.\n \"\"\".format(colors['colorspace'])\n )\n \n if nbands != len(colors['dark_on_light']):\n raise ValueError(\n \"\"\"Number of items in `colors['dark_on_light']` doesn't\n equal the number of bands in `colors['band']`!\"\"\"\n )\n\n # convert colorspace if necessary\n try:\n working_image = getattr(color, \"rgb2\" + colors['colorspace'].lower())(image)\n except:\n working_image = image.copy()\n if colors['colorspace'].lower() != 'rgb':\n raise ValueError(\n \"\"\"Didn't recognize specified colorspace. \n See skimage.color.rgb2* for options.\"\"\"\n )\n \n # pull bands\n if len(working_image.shape) == 3: # excludes rgb2gray\n working_image = [img_split(working_image)[i] for i in colors['band']]\n else:\n working_image = [working_image]\n nbands = 1\n \n else: # it's a black and white image\n nbands = 1\n working_image = [image.copy()]\n if len(image.shape) != 2:\n raise ValueError(\n \"\"\"Your `color` argument suggested a grayscale image, but it has \\\n multiple bands!\"\"\"\n )\n \n return(working_image)",
"def calc_source_blend_params(params,log):\n\n source = photometry_classes.Star()\n\n source.fs_g = params['f_s_g']\n source.sig_fs_g = params['sig_f_s_g']\n (source.g, source.sig_g) = flux_to_mag_pylima(source.fs_g,source.sig_fs_g)\n\n source.fs_r = params['f_s_r']\n source.sig_fs_r = params['sig_f_s_r']\n (source.r, source.sig_r) = flux_to_mag_pylima(source.fs_r,source.sig_fs_r)\n\n source.fs_i = params['f_s_i']\n source.sig_fs_i = params['sig_f_s_i']\n (source.i, source.sig_i) = flux_to_mag_pylima(source.fs_i,source.sig_fs_i)\n\n source.compute_colours(use_inst=True)\n source.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Source measured photometry:')\n log.info(source.summary(show_mags=True))\n log.info(source.summary(show_mags=False,show_colours=True))\n log.info(source.summary(show_mags=False,johnsons=True))\n\n blend = photometry_classes.Star()\n\n blend.fs_g = params['f_b_g']\n blend.sig_fs_g = params['sig_f_b_g']\n (blend.g, blend.sig_g) = flux_to_mag_pylima(blend.fs_g,blend.sig_fs_g)\n\n blend.fs_r = params['f_b_r']\n blend.sig_fs_r = params['sig_f_b_r']\n (blend.r, blend.sig_r) = flux_to_mag_pylima(blend.fs_r,blend.sig_fs_r)\n\n blend.fs_i = params['f_b_i']\n blend.sig_fs_i = params['sig_f_b_i']\n (blend.i, blend.sig_i) = flux_to_mag_pylima(blend.fs_i,blend.sig_fs_i)\n\n blend.compute_colours(use_inst=True)\n blend.transform_to_JohnsonCousins()\n\n log.info('\\n')\n log.info('Blend measured photometry:')\n log.info(blend.summary(show_mags=True))\n log.info(blend.summary(show_mags=False,show_colours=True))\n log.info(blend.summary(show_mags=False,johnsons=True))\n\n return source, blend",
"def find_brightest_biggest(filename, catname=\"sources.cat\", config=\"config.sex\", minsize=10,\n minflux=450000):\n # Get dimensions of the image\n hdulist = pf.open(filename)\n (height, width) = hdulist[0].data.shape\n bloblist = manual_blob_finder(hdulist[0].data)\n hdulist.close()\n\n if bloblist is None:\n return None\n\n sort_ind = np.argsort(bloblist['max'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if bloblist['width'][ind] >= minsize and bloblist['flux'][ind] > minflux:\n blob_ind = ind\n break\n\n if blob_ind is None:\n return None\n\n return (bloblist['cent_x'][blob_ind] - (float(width) / 2.),\n bloblist['cent_y'][blob_ind] - (float(height) / 2.),\n bloblist['flux'][blob_ind],\n bloblist['width'][blob_ind],\n bloblist['max'][blob_ind])\n\n '''\n # Use sextractor to find blobs.\n # N.B. may be tuning of parameters, but this was mostly unreliable and noisy.\n\n hdulist.close()\n\n # Source extract\n call([\"sextractor\", filename, \"-c\", config, \"-CATALOG_NAME\", catname])\n\n # Load the catalog file\n srclist = pf.open(catname)\n srctable = srclist[2].data\n sort_ind = np.argsort(srctable['FLUX_MAX'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if (srctable['FLUX_RADIUS'][ind] > minsize and srctable['FLUX_MAX'][ind] > minflux and\n srctable['FLUX_RADIUS'][ind] < maxradius and srctable['FLUX_MAX'][ind] < maxflux):\n blob_ind = ind;\n break\n if blob_ind is None:\n return None\n return (srctable['X_IMAGE'][blob_ind] - (float(width) / 2.), \n srctable['Y_IMAGE'][blob_ind] - (float(height) / 2.),\n srctable['FLUX_MAX'][blob_ind],\n srctable['FLUX_RADIUS'][blob_ind],\n srctable['SNR_WIN'][blob_ind])\n '''",
"def combine(name, sources):\n fluid = ht.ThermState(sources[0].fluid.name, T=ht.T_NTP, P=ht.P_NTP)\n if all([source.fluid.name == fluid.name for source in sources]):\n total_volume = sum([source.volume for source in sources])\n return Source(name, fluid, total_volume)\n else:\n print('\\nAll volumes should contain the same fluid')\n return None",
"def allbroadbandfilters(self):\n return [exposuretimeandbroadbandfilter[1] for exposuretimeandbroadbandfilter in self.__allexposuretimesandbroadbandfilters]",
"def bands(self) -> int:\n ...",
"def bands(self):\n\t\treturn zip((self.primary_threshold, self.upper_earning_limit),\n\t\t\t\t self.rates)",
"def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist",
"def get_rgb(input):\n rgb_band_idxs = [bands.index(b) for b in [\"S2B4\", \"S2B3\", \"S2B2\"]] # could be also hardcoded as [3,2,1]\n return input[rgb_band_idxs]",
"def get_brightest_mean(self, num_pix=3):\n peak_x = np.zeros(\n [len(self.pixel_x)]) # Create blank arrays for peaks\n # rather than a dict (faster)\n peak_y = np.zeros(peak_x.shape)\n peak_amp = np.zeros(peak_x.shape)\n\n # Loop over all tels to take weighted average of pixel\n # positions This loop could maybe be replaced by an array\n # operation by a numpy wizard\n\n tel_num = 0\n for tel in self.image:\n top_index = self.image[tel].argsort()[-1 * num_pix:][::-1]\n print(top_index, self.pixel_x[tel][top_index],\n self.image[tel][top_index])\n weight = self.image[tel][top_index]\n weighted_x = self.pixel_x[tel][top_index] * weight\n weighted_y = self.pixel_y[tel][top_index] * weight\n\n ppx = np.sum(weighted_x) / np.sum(weight)\n ppy = np.sum(weighted_y) / np.sum(weight)\n\n peak_x[tel_num] = ppx # Fill up array\n peak_y[tel_num] = ppy\n peak_amp[tel_num] = np.sum(weight)\n tel_num += 1\n\n self.peak_x = peak_x # * unit # Add to class member\n self.peak_y = peak_y # * unit\n self.peak_amp = peak_amp",
"def broadbandfilter(self):\n _, = self.broadbandfilters\n return _",
"def measure_photometric_source_colours(params,target,log):\n\n log.info('\\n')\n log.info('Attempting to estimate the source colours directly from the photometry')\n\n for f1,f2 in [ ('g','r'), ('g','i'), ('r','i')]:\n\n (source_colour,sig_source_colour,blend_flux, sig_blend_flux,fit) = phot_source_colour.measure_source_colour_odr(target.lightcurves[f1],\n target.lightcurves[f2])\n\n log.info('Fit to '+f2+' vs. '+f1+' flux:')\n log.info('Source colour ('+f1+'-'+f2+') = '+str(source_colour)+' +/- '+str(sig_source_colour))\n log.info('Blend flux in '+f2+': '+str(blend_flux)+' +/- '+str(sig_blend_flux))\n\n setattr(target,'fb_'+f2, blend_flux)\n setattr(target,'sig_fb_'+f2, sig_blend_flux)\n\n plot_file = path.join(params['red_dir'], 'flux_curve_'+f1+'_'+f2+'.eps')\n\n phot_source_colour.plot_bicolour_flux_curves(target.lightcurves[f1],\n target.lightcurves[f2],\n fit,f1,f2,\n plot_file)\n\n try:\n\n #gr = target.fb_g / target.fb_r\n #sig_gr = np.sqrt( (target.sig_fb_g/target.fb_g)**2 + (target.sig_fb_r/target.fb_r)**2 )\n\n #setattr(target,'blend_gr',gr)\n #setattr(target,'blend_sig_gr',sig_gr)\n\n ri = target.fb_r / target.fb_i\n sig_ri = np.sqrt( (target.sig_fb_r/target.fb_r)**2 + (target.sig_fb_i/target.fb_i)**2 )\n\n setattr(target,'blend_ri',ri)\n setattr(target,'blend_sig_ri',sig_ri)\n\n log.info('Blend colours:')\n log.info('(g-r)_b = '+str(target.blend_gr)+' +/- '+str(target.blend_sig_gr))\n log.info('(r-i)_b = '+str(target.blend_ri)+' +/- '+str(target.blend_sig_ri))\n\n except AttributeError:\n\n pass",
"def select_best_frames(self):\n\n # Cycle through all frames. Use the monochrome image for frame ranking.\n for frame in self.frames.frames_mono:\n # Cycle through all quality areas:\n for index_y, quality_area_row in enumerate(self.quality_areas):\n for index_x, quality_area in enumerate(quality_area_row):\n # If the alignment point list of the quality area is non-empty, compute the\n # local contrast.\n if quality_area['alignment_point_indices']:\n [y_low, y_high, x_low, x_high] = quality_area['coordinates']\n quality_area['frame_qualities'].append(\n Miscellaneous.local_contrast(frame[y_low:y_high, x_low:x_high],\n self.configuration.quality_area_pixel_stride))\n\n # For quality areas with alignment points, sort the computed quality ranks in descending\n # order.\n for index_y, quality_area_row in enumerate(self.quality_areas):\n for index_x, quality_area in enumerate(quality_area_row):\n if quality_area['alignment_point_indices']:\n quality_area['best_frame_indices'] = [b[0] for b in sorted(\n enumerate(quality_area['frame_qualities']), key=lambda i: i[1],\n reverse=True)]\n\n # For quality areas without alignment points, use method \"best_frame_indices_in_empty_areas\"\n # to copy ranks from the nearest quality area with alignment points.\n for index_y, quality_area_row in enumerate(self.quality_areas):\n for index_x, quality_area in enumerate(quality_area_row):\n if not quality_area['alignment_point_indices']:\n quality_area['best_frame_indices'] = self.best_frame_indices_in_empty_areas(\n index_y, index_x)",
"def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)",
"def get_bollinger_bands(rm, rstd):\n \n upper_band=rm+2*rstd\n lower_band=rm-2*rstd\n return upper_band, lower_band",
"def get_resample(name: str) -> str:\n\n methods = {\n \"first\":\n \"\"\"\nimport numpy as np\n\ndef first(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in reversed(range(len(in_ar))):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"last\":\n \"\"\"\nimport numpy as np\n\ndef last(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.ones(in_ar[0].shape)\n for i in range(len(in_ar)):\n mask = in_ar[i] == 0\n y *= mask\n y += in_ar[i]\n\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"max\":\n \"\"\"\nimport numpy as np\n\ndef max(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n y = np.max(in_ar, axis=0)\n np.clip(y,0,255, out=out_ar)\n\"\"\",\n \"average\":\n \"\"\"\nimport numpy as np\n\ndef average(in_ar, out_ar, xoff, yoff, xsize, ysize, raster_xsize,raster_ysize, buf_radius, gt, **kwargs):\n div = np.zeros(in_ar[0].shape)\n for i in range(len(in_ar)):\n div += (in_ar[i] != 0)\n div[div == 0] = 1\n \n y = np.sum(in_ar, axis = 0, dtype = 'uint16')\n y = y / div\n \n np.clip(y,0,255, out = out_ar)\n\"\"\"}\n\n if name not in methods:\n raise ValueError(\n \"ERROR: Unrecognized resampling method (see documentation): '{}'.\".\n format(name))\n\n return methods[name]",
"def calc_sources_power(sources, medium):\r\n I_0 = 1.0 / (2.0 * medium.density * medium.speed_of_sound)\r\n S = numpy.sum(sources['Ss'])\r\n W = I_0 * S\r\n return W",
"def image_sources(dim, xs, order, rc):\n sources = np.zeros((number_of_sources(order)+1, 6))\n \"\"\"gain factor of sound source = 1\n number of the last hitted wall = 0\n propagation path = 0, because 0 wall hitted\"\"\"\n sources[0, :] = [xs[0], xs[1], xs[2], 1, 0, 0]\n\n c = 0 # counter to iterate\n r = 1 # variable to write data in the corresponding row\n while c <= number_of_sources(order - 1):\n sq = mirror_source(dim, [sources[c, 0], sources[c, 1],\n sources[c, 2]], sources[c, 3], sources[c, 4], rc,\n sources[c, 5])\n sources[r:r+sq.shape[0], :] = sq\n c += 1\n r += sq.shape[0]\n return(sources)",
"def get_dark():\n\n # -- utilities\n nwav = 872\n nrow = 1600\n ncol = 20\n dpath = \"../../data/middleton/night time vnir full frame\"\n dname = \"full frame 20ms dark_VNIR.raw\"\n fname = os.path.join(dpath,dname)\n\n # -- read the file\n raw = 1.0*np.fromfile(open(fname,'rb'),np.uint16 \\\n ).reshape(ncol,nwav,nrow \\\n )[:,:,::-1].transpose(1,2,0)\n\n # -- take the mean spectrum of the upper and lower half and smooth\n upper = raw[:,:800,:].mean(-1).mean(-1)\n lower = raw[:,800:,:].mean(-1).mean(-1)\n\n smoff = [sm.nonparametric.lowess(upper,\n np.arange(len(upper)),frac=0.2)[:,1], \n sm.nonparametric.lowess(lower,\n np.arange(len(lower)),frac=0.2)[:,1]]\n\n return smoff, raw",
"def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]",
"def bands(self):\n\t\treturn self._bands",
"def min_max_match(ras, decs, width, DES_box=False):\n\n import numpy as np\n\n print(\"Finding max and mins:\", len(ras), 'sources')\n ra_min = str(np.min(ras) - width)\n ra_max = str(np.max(ras) + width)\n dec_min = str(np.min(decs) - width)\n dec_max = str(np.max(decs) + width)\n if float(ra_min) < 3.0 and float(ra_max) > 357.0:\n ra_min = str(0.0)\n ra_max = str(360.0)\n if DES_box:\n ra_min = str(np.median(ras) - 0.5)\n ra_max = str(np.median(ras) + 0.5)\n dec_min = str(np.median(decs) - 0.6)\n dec_max = str(np.median(decs) + 0.6)\n\n return ra_min, ra_max, dec_min, dec_max",
"def flatcombine(dir='Flats/*/dark_subtracted/'):\n\n for d in glob(dir):\n\n directory = \"/\".join(d.split('/')[0:2]) + '/swarped'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n keys = ['OBJECT', 'CAMNAME', 'FWINAME', 'ITIME', 'OBSDATE', 'FLSPECTR', 'HISTORY']\n images = ImageFileCollection(d, keywords=keys, glob_include='d*.fits')\n\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='on', camera='narrow', \n done='Dark Subtracted', output='cKNarrowLampOnH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='off', camera='narrow', \n done='Dark Subtracted', output='cKNarrowLampOffH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='H', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffH', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='on', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOnKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='off', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOffKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffKs', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='on', camera='narrow', done='Dark Subtracted',\n output='cNarrowLampOnJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='off', camera='narrow', done='Dark Subtracted',\n output='cKNarrowLampOffJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='on', camera='wide', done='Dark Subtracted',\n output='cKWideLampOnJ', type='PIXEL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp='off', camera='wide', done='Dark Subtracted',\n output='cKWideLampOffJ', type='PIXEL')",
"def get_vrt_band_list():\n logger.debug('get_vrt_band_list() called')\n vrt_band_list = []\n#===============================================================================\n# sensor_dict = self.bands[tile_type_id][(dataset_info['satellite_tag'], dataset_info['sensor_name'])]\n# # log_multiline(logger.debug, sensor, 'Sensor', '\\t')\n# for file_number in sorted(sensor_dict.keys()):\n# band_info = sensor_dict[file_number]\n# if band_info['level_name'] == 'NBAR':\n# dataset_dir = dataset_info['nbar_dataset_path']\n# dataset_id = dataset_info['nbar_dataset_id']\n# processing_level = dataset_info['nbar_level_name']\n# nodata_value = dataset_info['nbar_nodata_value']\n# resampling_method = dataset_info['nbar_resampling_method']\n# elif band_info['level_name'] == 'ORTHO':\n# dataset_dir = dataset_info['l1t_dataset_path']\n# dataset_id = dataset_info['l1t_dataset_id']\n# processing_level = dataset_info['l1t_level_name']\n# nodata_value = dataset_info['l1t_nodata_value']\n# resampling_method = dataset_info['l1t_resampling_method']\n# else:\n# continue # Ignore any pan-chromatic and derived bands\n# \n# dataset_dir = os.path.join(dataset_dir, 'scene01')\n# filename = find_file(dataset_dir, band_info['file_pattern'])\n# vrt_band_list.append({'file_number': band_info['file_number'], \n# 'filename': filename, \n# 'name': band_info['band_name'],\n# 'dataset_id': dataset_id,\n# 'band_id': band_info['band_id'],\n# 'processing_level': processing_level,\n# 'nodata_value': nodata_value,\n# 'resampling_method': resampling_method,\n# 'tile_layer': band_info['tile_layer']})\n#===============================================================================\n \n #TODO: Make this able to handle multiple derived layers\n for band_level in ['FC']:\n derived_bands = self.bands[tile_type_id][('DERIVED', band_level)]\n for file_number in sorted(derived_bands.keys()):\n band_info = derived_bands[file_number]\n file_pattern = band_info['file_pattern']\n dataset_dir = os.path.join(dataset_info['fc_dataset_path'], 'scene01')\n dataset_id = dataset_info['fc_dataset_id']\n filename = find_file(dataset_dir, file_pattern) \n processing_level = dataset_info['fc_level_name']\n nodata_value = dataset_info['fc_nodata_value'] # Should be None for FC\n resampling_method = dataset_info['fc_resampling_method']\n vrt_band_list.append({'file_number': None, \n 'filename': filename, \n 'name': band_info['band_name'],\n 'dataset_id': dataset_id,\n 'band_id': band_info['band_id'],\n 'processing_level': processing_level,\n 'nodata_value': nodata_value,\n 'resampling_method': resampling_method,\n 'tile_layer': 1})\n \n log_multiline(logger.debug, vrt_band_list, 'vrt_band_list = %s', '\\t')\n return vrt_band_list",
"def sim12_g_reference(datafiles, tolerances):\n catalog = Table.read(datafiles / 'sim12' / 'ref' / 'sim12_g_reference.fits')\n bright_filter = catalog['FLUX_ISO'] / catalog['FLUXERR_ISO'] >= tolerances['signal_to_noise']\n return catalog[bright_filter]"
]
| [
"0.6607561",
"0.6164541",
"0.6067196",
"0.57345235",
"0.5640365",
"0.5539922",
"0.55218196",
"0.54267144",
"0.541707",
"0.53260505",
"0.528097",
"0.5242749",
"0.5182124",
"0.5177732",
"0.51603484",
"0.5146035",
"0.5126072",
"0.5113353",
"0.5108828",
"0.50990474",
"0.50806177",
"0.5071445",
"0.5061005",
"0.50497437",
"0.5039073",
"0.5018404",
"0.5005536",
"0.49834508",
"0.49731225",
"0.49712417"
]
| 0.71598154 | 0 |
resample photons store sourcespecific images | def resample_photons(self, srcs, verbose=False):
# first, clear out old sample images
for src in srcs:
src.clear_sample_images()
# generate per-source sample image patch for each fits image in
# this field. keep track of photons due to noise
noise_sums = {}
for band, img in self.img_dict.iteritems():
if verbose:
print " ... resampling band %s " % band
samp_imgs, noise_sum = \
cel_mcmc.sample_source_photons_single_image_cython(
img, [s.params for s in srcs]
)
# tell each source to keep track of it's source-specific sampled
# images (and the image it was stripped out of)
for src, samp_img in zip(srcs, samp_imgs):
if samp_img is not None:
# cache pixel grid for each sample image
y_grid = np.arange(samp_img.y0, samp_img.y1, dtype=np.float)
x_grid = np.arange(samp_img.x0, samp_img.x1, dtype=np.float)
xx, yy = np.meshgrid(x_grid, y_grid, indexing='xy')
pixel_grid = np.column_stack((xx.ravel(order='C'), yy.ravel(order='C')))
src.sample_image_list.append((samp_img, img, pixel_grid))
# keep track of noise sums
noise_sums[band] = noise_sum
# resample noise parameter in each fits image
for band, img in self.img_dict.iteritems():
a_n = self.a_0 + noise_sums[band]
b_n = self.b_0 + img.nelec.size
#eps_tmp = img.epsilon
img.epsilon = np.random.gamma(a_n, 1./b_n) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Resampler(name):\n\n def resample_average(path, dsquery, dstile, image_format):\n for i in range(1, dstile.RasterCount+1):\n res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i), \"average\")\n if res != 0:\n raise ImageOutputException(\"RegenerateOverview() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n def resample_antialias(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n array = numpy.zeros((querysize, querysize, 4), numpy.uint8)\n for i in range(dstile.RasterCount):\n array[:,:,i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1), 0, 0, querysize, querysize)\n im = Image.fromarray(array, 'RGBA') # Always four bands\n im1 = im.resize((tilesize,tilesize), Image.ANTIALIAS)\n\n if os.path.exists(path):\n im0 = Image.open(path)\n im1 = Image.composite(im1, im0, im1)\n\n ensure_dir_exists(path)\n\n if image_format == \"JPEG\":\n im1.save(path, image_format, quality=jpeg_quality)\n else:\n im1.save(path, image_format)\n\n\n if name == \"average\":\n return resample_average\n elif name == \"antialias\":\n return resample_antialias\n\n resampling_methods = {\n \"near\" : gdal.GRA_NearestNeighbour,\n \"bilinear\" : gdal.GRA_Bilinear,\n \"cubic\" : gdal.GRA_Cubic,\n \"cubicspline\" : gdal.GRA_CubicSpline,\n \"lanczos\" : gdal.GRA_Lanczos\n }\n\n resampling_method = resampling_methods[name]\n\n def resample_gdal(path, dsquery, dstile, image_format):\n querysize = dsquery.RasterXSize\n tilesize = dstile.RasterXSize\n\n dsquery.SetGeoTransform( (0.0, tilesize / float(querysize), 0.0, 0.0, 0.0, tilesize / float(querysize)) )\n dstile.SetGeoTransform( (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) )\n\n res = gdal.ReprojectImage(dsquery, dstile, None, None, resampling_method)\n if res != 0:\n raise ImageOutputException(\"ReprojectImage() failed with error %d\" % res)\n\n gdal_write(path, dstile, image_format)\n\n return resample_gdal",
"def getimgs():",
"def nn_resample_image(image_path, pxl_box_width, pxl_box_height):\n image = Image.open(image_path)\n if image.format == \"JPEG\" or image.format == \"PNG\":\n __nn_resample_png_jpeg_image(image, pxl_box_width, pxl_box_height)\n elif image.format == \"GIF\":\n __nn_resample_gif_image(image, pxl_box_width, pxl_box_height)\n\n return image",
"def resample(self):\n pass",
"def resampled_to_img(self, target_image, interpolation=None):\n # IMPORTANT: Polymorphism can be implemented by walking the \n # MRO and finding a method that does not raise\n # NotImplementedError. \n raise NotImplementedError",
"def resample_img(img, img_type, size, spacing):\n resampler = sitk.ResampleImageFilter()\n resampler.SetOutputDirection(img.GetDirection())\n resampler.SetOutputOrigin(img.GetOrigin())\n resampler.SetOutputSpacing(spacing)\n resampler.SetSize(size)\n if img_type is \"Label\":\n resampler.SetInterpolator(sitk.sitkNearestNeighbor)\n elif img_type is \"Image\":\n resampler.SetInterpolator(sitk.sitkLinear)\n imgResampled = resampler.Execute(img)\n\n #axis have to be switched since np.array and keras use them in different order...\n x = np.transpose(sitk.GetArrayFromImage(imgResampled).astype(dtype=np.float), [2, 1, 0])\n return x",
"def resample(image, flow):\r\n assert flow.shape[1] == 2\r\n b, c, h, w = image.shape\r\n grid = get_grid(b, (h, w))\r\n flow = L.concat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0),\r\n flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], 1)\r\n final_grid = L.transpose((grid + flow), (0, 2, 3, 1))\r\n image.stop_gradient = False\r\n try:\r\n output = nn.functional.grid_sample(image, final_grid, mode='bilinear', padding_mode='border', align_corners=True)\r\n except Exception:\r\n output = nn.functional.grid_sample(image, final_grid, mode='bilinear', padding_mode='border')\r\n \r\n return output\r\n # return image\r\n # return L.zeros_like(image)\r",
"def resample_by(image_array, compression_factor_list, is_seg=False):\n original_dims_in_pixels = [image_array.shape[d]\n for d in range(len(image_array.shape))]\n target_pixel_dims_list = [int(math.floor(compression_factor_list[d] * original_dims_in_pixels[d]))\n for d in range(len(image_array.shape))]\n\n resized_image = resample(image_array, target_pixel_dims_list, is_seg)\n\n return(resized_image)",
"def change_resolution(img):\n scale_factor = np.random.choice(list(range(0, 6, 2)))\n if scale_factor == 0:\n return img\n downsample = nn.AvgPool2d(scale_factor)\n upsample = nn.UpsamplingNearest2d(scale_factor=scale_factor)\n new_res_img = upsample(downsample(img.unsqueeze(dim=1))).squeeze()\n return new_res_img",
"def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))",
"def resample_sliprate(self, dt, nsamp):\n for ps in self.pointsources:\n ps.resample_sliprate(dt, nsamp)",
"def _prepareImage(self):\n painter = QPainter(self)\n if len(self.thumbs) == 0:\n return\n destwidth = self.width()\n division = len(self.thumbs)\n NF = division\n slit_width = destwidth // division + 1\n if slit_width < self.minwidth:\n slit_width = self.minwidth\n division = destwidth // slit_width - 1\n for slit in range(division):\n point = QPoint(slit*destwidth // division,0)\n i = slit*NF // division\n thumb = self.transformer(self.thumbs[i])\n w = thumb.width()\n h = thumb.height()\n if w > slit_width:\n w0 = (w-slit_width)//2\n cropped = thumb.copy(w0,0,slit_width,h)\n painter.drawImage(point, cropped)\n else:\n painter.drawImage(point, thumb)",
"def transform_images(img1,img2):",
"def prepare_images(self):\n\n qt_original_image = self.convert_image_to_QTformat(self.original_image)\n self.send_original_photo_to_gui.emit(qt_original_image)\n\n self.processed_image = self.procces_image(self.original_image)\n qt_processed_image = self.convert_image_to_QTformat(self.processed_image)\n self.send_processed_photo_to_gui.emit(qt_processed_image)",
"def __augmented_images(self, info, start):\n count = start\n final_img_to_save = []\n for pair in info:\n processedImage = self.__processImage(os.path.join(WORKING_DIR, pair[0]))\n if processedImage == None:\n continue\n # translation is not that important since CNNs are resistant to image translations\n rotatedImages = self.__applyRotations(processedImage)\n\n rotCount = 1\n for img in rotatedImages:\n filename = str(count) + \"_\" + str(rotCount) + \".jpg\"\n # img.save(os.path.join(directory, filename))\n final_img_to_save.append((img, pair[1], filename))\n rotCount += 1\n\n print(\"Augmenting image: {:05}\".format(count))\n count += 1\n return final_img_to_save",
"def test_nib_resample_image_3d_to_dest(fake_3dimage_nib, fake_3dimage_nib_big):\n img_r = resampling.resample_nib(fake_3dimage_nib, image_dest=fake_3dimage_nib_big, interpolation='linear')\n assert img_r.get_data().shape == (29, 39, 19)\n assert img_r.get_data()[4, 4, 4] == 1.0",
"def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image",
"def resample(self):\n self.variational_strategy.resample()",
"def create_preset_images(self):\n for f in sorted(self.get_files_from_data()):\n photoInstances = {}\n for preset in self.generator.settings[\"GALLERY_PRESETS\"]:\n preset_dir = \"%s%s%s\" % (self.absolute_output_path,\n os.sep, \n preset[\"name\"])\n photoInstances[preset[\"name\"]] = Photo(self, f, preset_dir, preset)\n \n self.photos.append(photoInstances)",
"def resample_filters(self):\n a = len(self.filtered_ids)\n b = len(self.orig_image_ids)\n imbalance_ratio = a / b\n min_ratio = 0.5\n if imbalance_ratio > min_ratio:\n return\n minr = min_ratio\n num_req = int((minr * b - a) / (1 - minr))\n new_ids = ((num_req) // a) * self.filtered_ids\n if num_req % a != 0:\n some_more = random.sample(self.filtered_ids, k=(num_req % a))\n new_ids += some_more\n self.image_ids = self.orig_image_ids + new_ids\n print(\"Resampled total:\", len(self.image_ids))",
"def transform_single_imgs(\n self, imgs, confounds=None, sample_mask=None, copy=True\n ):\n raise NotImplementedError()",
"def imgResample(img, spacing, size=[], useNearest=False, origin=[], outsideValue=0):\n if len(spacing) != img.GetDimension(): raise Exception(\"len(spacing) != \" + str(img.GetDimension()))\n\n # Set Size\n if size == []:\n inSpacing = img.GetSpacing()\n inSize = img.GetSize()\n size = [int(math.ceil(inSize[i]*(inSpacing[i]/spacing[i]))) for i in range(img.GetDimension())]\n else:\n if len(size) != img.GetDimension(): raise Exception(\"len(size) != \" + str(img.GetDimension()))\n \n if origin == []:\n origin = [0]*img.GetDimension()\n else:\n if len(origin) != img.GetDimension(): raise Exception(\"len(origin) != \" + str(img.GetDimension()))\n \n # Resample input image\n interpolator = [sitk.sitkLinear, sitk.sitkNearestNeighbor][useNearest]\n identityTransform = sitk.Transform()\n identityDirection = list(sitk.AffineTransform(img.GetDimension()).GetMatrix())\n\n return sitk.Resample(img, size, identityTransform, interpolator, origin, spacing, identityDirection, outsideValue)",
"def sampling():\n # make directory for street images\n streetImageOutputFolder = CONFIG[\"sampling\"][\"streetImageOutputFolder\"]\n makeDirectory(streetImageOutputFolder)\n\n # Get preprocessed point data\n intersectionPointFile = CONFIG[\"shapefile\"][\"intersectoinPointFile\"]\n pointInfoFile = CONFIG[\"shapefile\"][\"pointInfoFilename\"]\n\n pointInfo = readPointFile(pointInfoFile)\n intersectionPointInfo = readIntersectionPointInfo(intersectionPointFile)\n\n # Filter point data that has street images taken within the specified period.\n maxYear = CONFIG[\"gmap\"][\"streetImageMaxYear\"]\n minYear = CONFIG[\"gmap\"][\"streetImageMinYear\"]\n filteredPoints = filterPointByYear(pointInfo, maxYear, minYear)\n\n IMG_NAME_COL_NUM = 5\n LAT_LNG_COL_NUM = 2\n\n # Sample street images, the return is list of sample info\n sampleNum = CONFIG[\"sampling\"][\"sampleNum\"]\n initImageNumber = CONFIG[\"sampling\"][\"initImageNumber\"]\n sampleData = sampleAndDownloadStreetImage(filteredPoints, sampleNum, initImageNumber, initImageNumber, streetImageOutputFolder, intersectionPointInfo)\n imageNames = [streetImageOutputFolder + \"/\" + data[IMG_NAME_COL_NUM] for data in sampleData]\n links = GDriveUpload(imageNames, \"Sampled_Image\")\n\n for i in xrange(len(sampleData)):\n imageName = streetImageOutputFolder + \"/\" + sampleData[i][IMG_NAME_COL_NUM]\n sampleData[i].append(links[imageName])\n\n columnTitle = [\"Sample Number\", \"Sampled Point Number\", \"Latitude + Longitude\", \"Heading\", \"Date\", \"Image Name\", \"Road Types\", \"Web Link\"]\n sampleData.insert(0, columnTitle)\n\n # output to csv file\n outputCSV(sampleData, CONFIG[\"sampling\"][\"csvFilename\"])\n\n # plot images map\n sampledPoints = set([divideGPS(d[LAT_LNG_COL_NUM]) for d in sampleData[1:]])\n plotSampledPointMap(list(sampledPoints), CONFIG[\"sampling\"][\"sampledPointsMapFilename\"])",
"def sample_images(self, epoch):\n synth_data = self.generator(self.constNoise)\n utils.vector_to_img(synth_data, \"./outputs/trial{}/gan{}/epoch{}.jpg\".format(self.trial, self.id, epoch))",
"def update_image(self):\n if self.filenames:\n pos = self.slider.value()\n proj, flat, dark, theta = dx.read_aps_32id(self.filenames, proj=(pos, pos+1))\n if self.ffc_correction:\n image = proj[0,:,:].astype(np.float)/flat[0,:,:].astype(np.float)\n else:\n image = proj[0,:,:].astype(np.float)\n self.image_item.setImage(image)",
"def image_resample(f, oversamp=1.0):\n img = imageio.imread(f)\n x = np.linspace(0,img.shape[0],img.shape[0])\n y = np.linspace(0,img.shape[1],img.shape[1])\n #\n # 2x oversample the image since we'll dither it.\n #\n xnew = np.linspace(0, img.shape[0], img.shape[0]*oversamp)\n ynew = np.linspace(0, img.shape[1], img.shape[1]*oversamp)\n from scipy import interpolate\n rc = interpolate.interp2d(x, y, img[:,:,0].flatten(), kind='linear')\n gc = interpolate.interp2d(x, y, img[:,:,1].flatten(), kind='linear')\n bc = interpolate.interp2d(x, y, img[:,:,2].flatten(), kind='linear')\n rgb_new = np.stack([rc(xnew.flatten(), ynew.flatten()),\n gc(xnew.flatten(), ynew.flatten()),\n bc(xnew.flatten(), ynew.flatten())],-1).transpose(1,0,2).astype(np.uint8)\n plt.imshow(rgb_new)\n return rgb_new",
"def images(self, **kwargs):\n\n raise NotImplementedError",
"def ImageOutput(name, out_ds, tile_size, resampling, init_dest, output_dir, verbose,mbtiles):\n\n resampler = Resampler(resampling)\n\n if name == \"hybrid\":\n return HybridImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose)\n\n if name == \"png\":\n image_format = \"PNG\"\n elif name == \"jpeg\":\n image_format = \"JPEG\"\n\n return SimpleImageOutput(out_ds, tile_size, resampler, init_dest, output_dir, verbose, [image_format],mbtiles)",
"def exportImg(self):\n if self.superSampling:\n print(\"Exporting with size adjusted\")\n self.img = self.img.resize((int(self.width/2),int(self.height/2)),Image.NEAREST)\n self.img.save(self.fileName,\"PNG\")",
"def make_model_sources_image(shape, model, source_table, oversample=1):\n image = np.zeros(shape, dtype=float)\n yidx, xidx = np.indices(shape)\n\n params_to_set = []\n for param in source_table.colnames:\n if param in model.param_names:\n params_to_set.append(param)\n\n # Save the initial parameter values so we can set them back when\n # done with the loop. It's best not to copy a model, because some\n # models (e.g., PSF models) may have substantial amounts of data in\n # them.\n init_params = {param: getattr(model, param) for param in params_to_set}\n\n try:\n for source in source_table:\n for param in params_to_set:\n setattr(model, param, source[param])\n\n if oversample == 1:\n image += model(xidx, yidx)\n else:\n image += discretize_model(model, (0, shape[1]),\n (0, shape[0]), mode='oversample',\n factor=oversample)\n finally:\n for param, value in init_params.items():\n setattr(model, param, value)\n\n return image"
]
| [
"0.6751509",
"0.60750306",
"0.601508",
"0.60111654",
"0.59917426",
"0.58809173",
"0.584998",
"0.5743221",
"0.57146",
"0.569997",
"0.56717074",
"0.5639599",
"0.5603953",
"0.55748373",
"0.55677515",
"0.55628645",
"0.55563945",
"0.5553056",
"0.55517864",
"0.55487037",
"0.55455303",
"0.55383873",
"0.55382377",
"0.5523729",
"0.55073816",
"0.5498201",
"0.5474303",
"0.5468119",
"0.5465306",
"0.54613715"
]
| 0.6748826 | 1 |
Given an initial source, "source" with a bounding box, find all sources in source_list where their bounding box intersects with "source"'s bounding box. Collect all sources that contribute to this source's background model image | def get_active_sources(source, source_list, image):
def intersect(sa, sb, image):
xlima, ylima = sa.bounding_boxes[image]
xlimb, ylimb = sb.bounding_boxes[image]
widtha, heighta = xlima[1] - xlima[0], ylima[1] - ylima[0]
widthb, heightb = xlimb[1] - xlimb[0], ylimb[1] - ylimb[0]
return (np.abs(xlima[0] - xlimb[0])*2 < (widtha + widthb)) and \
(np.abs(ylima[0] - ylimb[0])*2 < (heighta + heightb))
return [s for s in source_list if intersect(s, source, image) and s is not source] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_sources(image):\n from scipy import ndimage\n from astropy.stats import mad_std\n\n img1 = image.copy().astype('float32')\n m, s = np.median(image), mad_std(image)\n src_mask = image > m + 3.0 * s\n # set the background to the min value of the sources\n img1[~src_mask] = img1[src_mask].min()\n # this rescales (min,max) to (0,1)\n img1 = (img1.min() - img1) / (img1.min() - img1.max())\n img1[~src_mask] = 0.\n\n def obj_params_with_offset(img, labels, aslice, label_idx):\n y_offset = aslice[0].start\n x_offset = aslice[1].start\n thumb = img[aslice]\n lb = labels[aslice]\n yc, xc = ndimage.center_of_mass(thumb, labels=lb, index=label_idx)\n br = thumb[lb == label_idx].sum() # the intensity of the source\n return [br, xc + x_offset, yc + y_offset]\n\n srcs_labels, num_srcs = ndimage.label(img1)\n\n if num_srcs < 10:\n print(\"WARNING: Only %d sources found.\" % (num_srcs))\n\n # Eliminate here all 1 pixel sources\n all_objects = [[ind + 1, aslice] for ind, aslice\n in enumerate(ndimage.find_objects(srcs_labels))\n if srcs_labels[aslice].shape != (1, 1)]\n lum = np.array([obj_params_with_offset(img1, srcs_labels, aslice, lab_idx)\n for lab_idx, aslice in all_objects])\n\n lum = lum[lum[:, 0].argsort()[::-1]] # sort by brightness descending order\n\n return lum[:, 1:]",
"def find_sources(image, vignette=3,vignette_rectangular=1., cutouts=None,sigma_threshold_for_source_detection=5, only_rectangle=None, FWHM=4):\n #find sources\n #bkg_sigma = mad_std(image)\n mean, median, std = sigma_clipped_stats(image, sigma=3.0)\n image = copy.copy(image)\n # #only search sources in a circle with radius <vignette>\n if(vignette < 3):\n sidelength = np.max(image.shape)\n x = np.arange(0, image.shape[1])\n y = np.arange(0, image.shape[0])\n vignette = vignette * sidelength/2\n mask = (x[np.newaxis,:]-sidelength/2)**2 + (y[:,np.newaxis]-sidelength/2)**2 < vignette**2\n image[~mask] = median\n\n #ignore a fraction of the image at the corner\n if(vignette_rectangular < 1.):\n sidelength_x = image.shape[1]\n sidelength_y = image.shape[0]\n cutoff_left = (1.-vignette_rectangular)*sidelength_x\n cutoff_right = vignette_rectangular*sidelength_x\n cutoff_bottom = (1.-vignette_rectangular)*sidelength_y\n cutoff_top = vignette_rectangular*sidelength_y\n x = np.arange(0, image.shape[1])\n y = np.arange(0, image.shape[0])\n left = x[np.newaxis,:] > cutoff_left \n right = x[np.newaxis,:] < cutoff_right\n bottom = y[:,np.newaxis] > cutoff_bottom\n top = y[:,np.newaxis] < cutoff_top\n mask = (left*bottom)*(right*top)\n image[~mask] = median\n\n \n #cut out rectangular regions of the image, [(xstart, xend, ystart, yend)]\n if(cutouts != None):\n sidelength = np.max(image.shape)\n x = np.arange(0, image.shape[1])\n y = np.arange(0, image.shape[0])\n for cutout in cutouts:\n print(\"cutting out {}\".format(cutout))\n left = x[np.newaxis,:] > cutout[0] \n right = x[np.newaxis,:] < cutout[1] \n bottom = y[:,np.newaxis] > cutout[2]\n top = y[:,np.newaxis] < cutout[3]\n mask = (left*bottom)*(right*top)\n image[mask] = median\n\n #use only_rectangle within image format: (xstart, xend, ystart, yend)\n if(only_rectangle != None):\n sidelength = np.max(image.shape)\n x = np.arange(0, image.shape[1])\n y = np.arange(0, image.shape[0])\n print(\"only using {}\".format(only_rectangle))\n left = x[np.newaxis,:] > only_rectangle[0] \n right = x[np.newaxis,:] < only_rectangle[1] \n bottom = y[:,np.newaxis] > only_rectangle[2]\n top = y[:,np.newaxis] < only_rectangle[3]\n mask = (left*bottom)*(right*top)\n image[~mask] = median\n\n #daofind = DAOStarFinder(fwhm=4., threshold=5.*std, brightest=200)\n #daofind = DAOStarFinder(fwhm=7., threshold=0.6, brightest=400 )\n daofind = DAOStarFinder(fwhm=FWHM, threshold=sigma_threshold_for_source_detection *std, brightest=s.N_BRIGHTEST_SOURCES )\n if(s.DETECTION_ABSOLUTE_THRESHOLD is not None):\n daofind = DAOStarFinder(fwhm=FWHM, threshold=s.DETECTION_ABSOLUTE_THRESHOLD, brightest=s.N_BRIGHTEST_SOURCES )\n\n\n\n\n sources = daofind(image)\n for col in sources.colnames:\n sources[col].info.format = '%.8g' # for consistent table output\n\n #changed order of positions to [(x,y), (x,y),...] for compatibility with photutils 1.4\n xcenters = np.array(sources['xcentroid'])\n ycenters = np.array(sources['ycentroid'])\n positions = [(xcenters[i], ycenters[i]) for i in range(len(xcenters))]\n apertures = CircularAperture(positions, r=4.)\n phot_table = aperture_photometry(image, apertures)\n for col in phot_table.colnames:\n phot_table[col].info.format = '%.8g' # for consistent table output\n\n observation = Table(phot_table).to_pandas()\n\n #through out candidates where the star finder messed up\n observation = observation.query(\"aperture_sum > \"+str(5*std))\n return observation",
"def _sourceBoundingBox(self, source, width, height):\n pos = source.get('position')\n bbox = {'left': 0, 'top': 0, 'right': width, 'bottom': height}\n if not pos:\n return bbox\n x0, y0, x1, y1 = 0, 0, width, height\n if 'crop' in pos:\n x0 = min(max(pos['crop'].get('left', x0), 0), width)\n y0 = min(max(pos['crop'].get('top', y0), 0), height)\n x1 = min(max(pos['crop'].get('right', x1), x0), width)\n y1 = min(max(pos['crop'].get('bottom', y1), y0), height)\n bbox['crop'] = {'left': x0, 'top': y0, 'right': x1, 'bottom': y1}\n corners = np.array([[x0, y0, 1], [x1, y0, 1], [x0, y1, 1], [x1, y1, 1]])\n m = np.identity(3)\n m[0][0] = pos.get('s11', 1) * pos.get('scale', 1)\n m[0][1] = pos.get('s12', 0) * pos.get('scale', 1)\n m[0][2] = pos.get('x', 0)\n m[1][0] = pos.get('s21', 0) * pos.get('scale', 1)\n m[1][1] = pos.get('s22', 1) * pos.get('scale', 1)\n m[1][2] = pos.get('y', 0)\n if not np.array_equal(m, np.identity(3)):\n bbox['transform'] = m\n try:\n bbox['inverse'] = np.linalg.inv(m)\n except np.linalg.LinAlgError:\n msg = 'The position for a source is not invertable (%r)'\n raise TileSourceError(msg, pos)\n transcorners = np.dot(m, corners.T)\n bbox['left'] = min(transcorners[0])\n bbox['top'] = min(transcorners[1])\n bbox['right'] = max(transcorners[0])\n bbox['bottom'] = max(transcorners[1])\n return bbox",
"def getSourceSubset(self, selection=None):\n if not selection or selection.lower() == \"all\":\n return self.sources\n # sort by brightness\n from past.builtins import cmp\n from functools import cmp_to_key\n srclist0 = sorted(self.sources, key=cmp_to_key(lambda a, b: cmp(b.brightness(), a.brightness())))\n all = set([src.name for src in srclist0])\n srcs = set()\n for ispec, spec in enumerate(re.split(\"\\s+|,\", selection)):\n spec = spec.strip()\n if spec:\n # if first spec is a negation, then implictly select all sources first\n if not ispec and spec[0] in \"!-\":\n srcs = all\n if spec.lower() == \"all\":\n srcs = all\n elif self._re_bynumber.match(spec):\n negate, start, end = self._re_bynumber.match(spec).groups()\n sl = slice(int(start) if start else None, int(end) if end else None)\n if negate:\n srcs.difference_update([src.name for src in srclist0[sl]])\n else:\n srcs.update([src.name for src in srclist0[sl]])\n elif spec.startswith(\"-=\") or spec.startswith(\"!=\"):\n srcs.difference_update([src.name for src in srclist0 if getattr(src, spec[2:], None)])\n elif spec.startswith(\"=\"):\n srcs.update([src.name for src in srclist0 if getattr(src, spec[1:], None)])\n elif spec.startswith(\"-\") or spec.startswith(\"!\"):\n srcs.discard(spec[1:])\n else:\n srcs.add(spec)\n # make list\n return [src for src in srclist0 if src.name in srcs]",
"def detect_sources(snr, minsnr=4, minsize=8, maxsize=32, minsep=0,\n min_snr_ratio=0.1, maxsrc=20, measure=None):\n if minsize > maxsize:\n raise ValueError('Expected minsize <= maxsize.')\n ny, nx = snr.shape\n # Label all non-overlapping regions above SNRmin in the inset image.\n labeled, nlabels = scipy.ndimage.label(snr > minsnr)\n if nlabels == 0:\n return []\n labels = np.arange(1, nlabels + 1)\n # Calculate bounding boxes for each candidate source.\n bboxes = scipy.ndimage.find_objects(labeled)\n # Estimate the quadrature summed SNR for each candidate source.\n snrtot = scipy.ndimage.labeled_comprehension(\n snr, labeled, labels, out_dtype=float, default=-1,\n func=lambda X: np.sqrt(np.sum(X ** 2)))\n maxsnrtot = None\n # Rank sources by snrtot.\n ranks = np.argsort(snrtot)[::-1]\n # Build the final list of detected sources.\n sources = []\n snrsq = snr ** 2\n minsepsq = minsep ** 2\n centroids = np.empty((maxsrc, 2))\n for idx in range(nlabels):\n label = labels[ranks[idx]]\n srcsnrtot = snrtot[label - 1]\n if maxsnrtot is not None and srcsnrtot < min_snr_ratio * maxsnrtot:\n break\n # Lookup this source's bounding box.\n yslice, xslice = bboxes[label - 1]\n size = max(yslice.stop - yslice.start, xslice.stop - xslice.start)\n if size < minsize or size > maxsize:\n continue\n # Calculate the SNR**2 weighted center of mass for this source.\n yc, xc = scipy.ndimage.center_of_mass(snrsq, labeled, label)\n nsrc = len(sources)\n if nsrc > 0 and minsep > 0:\n # Calculate the distance to each previous source.\n rsq = np.sum((centroids[:nsrc] - np.array([xc, yc])) ** 2, axis=1)\n if np.any(rsq < minsepsq):\n continue\n params = (srcsnrtot, xc, yc, yslice, xslice)\n if measure is not None:\n params = measure(*params)\n if params is None:\n continue\n centroids[nsrc] = (xc, yc)\n if maxsnrtot is None:\n maxsnrtot = srcsnrtot\n sources.append(params)\n if len(sources) == maxsrc:\n break\n return sources",
"def get_source_patch_masks(self):\n self.source_patch_masks = {\n patch_center: self.get_patch_mask(patch_center)\n for patch_center in self.patch_centers\n if not np.bitwise_and(self.get_patch_mask(patch_center), self.unknown_mask).any()\n }\n self.patch_centers = tuple(list(self.source_patch_masks.keys()))",
"def get_background_extracted_images(img):\n kernel = np.ones((5, 5), np.float32) / 25\n img = cv2.filter2D(img, -1, kernel)\n\n background_color = br.get_primary_background_color(img)\n spot_size = 200\n background_location = br.get_background_spot(img, background_color, spot_size)\n binary_threshold = 25\n binary_img = br.generate_binary_background_image(img, background_color, binary_threshold)\n binary_background_img = br.separate_background(binary_img, background_location)\n cropped_images = br.crop_image_rectangles(img, binary_background_img)\n feature_threshold = 10\n valid_cropped_images = br.validate_cropped_images(cropped_images, feature_threshold)\n return valid_cropped_images",
"def test_filter_bg(images):\n print('STARTING BACKGROUND FILTERING TEST')\n\n for i, image in enumerate(images):\n bg_filtered_image = filter_bg(image)\n\n contours, _ = cv2.findContours(bg_filtered_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n bg_filtered_image = cv2.drawContours(cv2.cvtColor(bg_filtered_image, cv2.COLOR_GRAY2BGR), contours, 0, [255,0,0], 1)\n\n original_and_filtered = hstack_images(image, bg_filtered_image, False)\n cv2.imshow('original & background filtered image ({})'.format(i), original_and_filtered)\n cv2.waitKey(0)\n\n cv2.destroyAllWindows()\n print('FINISHED BACKGROUND FILTERING TEST\\n')",
"def gen(self):\n for path, bg_idx, bbox in zip(self.img_paths, self.bgs, self.bbox):\n img = cv2.imread(self.background[bg_idx])\n for alpha, obj, box in zip(self.alphas, self.objects, bbox):\n img, mask = self.alpha_blend(img, obj, box, alpha)\n yield path, img, mask",
"def add_sources(self, model, params, sample_box=100):\n\n if model == 'gaussian':\n model = astropy.modeling.functional_models.Gaussian2D\n\n # We use a small grid to make the computing of the source faster.\n if sample_box is None:\n sample_box_x, sample_box_y = self.ccd.shape\n else:\n sample_box_x = sample_box_y = sample_box\n\n y, x = numpy.mgrid[0:sample_box_y, 0:sample_box_x]\n\n if isinstance(params, numpy.ndarray):\n assert params.ndim == 2, 'invalid number of dimensions in params'\n params = params.tolist()\n\n for nn in range(len(params)):\n model_n = model(*params[nn])\n self.add_source(model_n, x, y)",
"def _add_source_net_filter(self, rule_list, source_net):\n for rule in rule_list:\n if (\"source\" in rule.keys()):\n if (\"nets\" in rule[\"source\"].keys()):\n rule[\"source\"][\"nets\"].append(source_net)\n else:\n rule[\"source\"].update({\"nets\": [source_net]})\n else:\n rule.update({\"source\": {\"nets\": [source_net]}})",
"def findsources(self, *args, **kwargs):\n return _image.image_findsources(self, *args, **kwargs)",
"def associate(conn, detected_sources, imobj, search_radius, save):\n # Find image resolution class\n for config, res_range in res_dict.items():\n if res_range[0] < imobj.bmin <= res_range[1]:\n res_class = config\n \n # Extract all previously detected sources in the same FOV\n assoc_rows = cone_search(conn, 'assoc_source', imobj.obs_ra,\n imobj.obs_dec, search_radius)\n match_logger.info('Extracted {} sources from assoc_source table '\n 'within {} degrees.'.format(\n len(assoc_rows), search_radius))\n # Limit to sources taken from images of similar resolution\n if len(assoc_rows) > 0:\n filtered_assoc_rows = filter_res(assoc_rows, res_class)\n else:\n filtered_assoc_rows = []\n\n if not filtered_assoc_rows:\n # No previous sources found in that sky region at that resolution\n for src in detected_sources:\n src.res_class = res_class\n src.ndetect = 1\n detected_matched = []\n detected_unmatched = detected_sources\n assoc_matched = []\n assoc_unmatched = []\n else:\n # Translate row dictionaries to DetectedSource objects\n assoc_sources = []\n assoc_ids = []\n for asrc in filtered_assoc_rows:\n assoc_ids.append(asrc['id'])\n assoc_sources.append(dbclasses.DetectedSource())\n dbclasses.dict2attr(assoc_sources[-1], asrc)\n match_logger.info('Attempting to match {} sources from this image to '\n '{} sources previously detected in VLITE images...'.\n format(len(detected_sources), len(assoc_sources)))\n\n detected_matched = []\n detected_unmatched = []\n assoc_matched = []\n assoc_unmatched = []\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Print results without saving to database\n if not save:\n # Dump detected_sources into temporary table\n sql = (\n '''\n CREATE TEMP TABLE temp_source (\n src_id INTEGER,\n ra DOUBLE PRECISION,\n dec DOUBLE PRECISION\n );\n ''')\n cur.execute(sql)\n conn.commit()\n for src in detected_sources:\n cur.execute('''INSERT INTO temp_source (\n src_id, ra, dec) VALUES (%s, %s, %s)''', (\n src.src_id, src.ra, src.dec))\n conn.commit()\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM temp_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b WHERE b.id IN %s\n ORDER BY q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1)\n AS bb'''\n values = (0.5*imobj.bmin, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n cur.execute('DROP TABLE temp_source')\n conn.commit()\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n match_logger.info('src_id match assoc_id\\tra\\t\\te_ra\\t\\t\\tdec\\t\\t'\n 'e_dec\\t\\tseparation (arcsec)\\tndetect')\n match_logger.info('-----------------------------------------------'\n '-----------------------------------------------'\n '---------------------------------')\n # Save association results for database\n else:\n # Find nearest neighbor & \"match\" if within half a beam\n sql = '''SELECT a.src_id, bb.id AS assoc_id,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) AS sep,\n 3600*q3c_dist(a.ra, a.dec, bb.ra, bb.dec) < %s AS match\n FROM detected_source AS a, LATERAL (\n SELECT b.* FROM assoc_source AS b\n WHERE a.image_id = %s AND b.id IN %s ORDER BY\n q3c_dist(a.ra, a.dec, b.ra, b.dec) ASC LIMIT 1) AS bb'''\n values = (0.5*imobj.bmin, imobj.id, tuple(assoc_ids))\n cur.execute(sql, values)\n rows = cur.fetchall()\n\n cur.close()\n\n # Create dictionary of src_id keys & associated values\n rowdict = {}\n for row in rows:\n rowdict[row['src_id']] = [row['assoc_id'], row['sep'], row['match']]\n\n for src in detected_sources:\n # Get the associated source object\n asrc = [msrc for msrc in assoc_sources if \\\n msrc.id == rowdict[src.src_id][0]][0]\n if rowdict[src.src_id][2]:\n # It's a match!\n src.assoc_id = asrc.id\n detected_matched.append(src)\n # Compute weighted averages\n cur_sigra_sq = asrc.e_ra * asrc.e_ra\n cur_sigdec_sq = asrc.e_dec * asrc.e_dec\n asrc.e_ra = np.sqrt(1. / (\n (1. / cur_sigra_sq) + (1. / (src.e_ra * src.e_ra))))\n asrc.ra = (asrc.e_ra * asrc.e_ra) * (\n (asrc.ra / cur_sigra_sq) + (src.ra / (\n src.e_ra * src.e_ra)))\n asrc.e_dec = np.sqrt(1. / (\n (1. / cur_sigdec_sq) + (1. / (src.e_dec * src.e_dec))))\n asrc.dec = (asrc.e_dec * asrc.e_dec) * (\n (asrc.dec / cur_sigdec_sq) + (src.dec / (\n src.e_dec * src.e_dec)))\n asrc.ndetect += 1\n assoc_matched.append(asrc)\n else:\n # No match -- new source\n src.res_class = res_class\n src.ndetect = 1\n detected_unmatched.append(src)\n assoc_unmatched.append(asrc)\n if not save:\n match_logger.info('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(\n src.src_id, rowdict[src.src_id][2], asrc.id, asrc.ra,\n asrc.e_ra, asrc.dec, asrc.e_dec, rowdict[src.src_id][1],\n asrc.ndetect))\n\n match_logger.info(' -- number of matches: {}'.format(len(detected_matched)))\n match_logger.info(' -- number of new sources to add: {}'.format(\n len(detected_unmatched)))\n\n return detected_matched, detected_unmatched, assoc_matched, assoc_unmatched",
"def image_sources(dim, xs, order, rc):\n sources = np.zeros((number_of_sources(order)+1, 6))\n \"\"\"gain factor of sound source = 1\n number of the last hitted wall = 0\n propagation path = 0, because 0 wall hitted\"\"\"\n sources[0, :] = [xs[0], xs[1], xs[2], 1, 0, 0]\n\n c = 0 # counter to iterate\n r = 1 # variable to write data in the corresponding row\n while c <= number_of_sources(order - 1):\n sq = mirror_source(dim, [sources[c, 0], sources[c, 1],\n sources[c, 2]], sources[c, 3], sources[c, 4], rc,\n sources[c, 5])\n sources[r:r+sq.shape[0], :] = sq\n c += 1\n r += sq.shape[0]\n return(sources)",
"def select_sources(cat_table, cuts):\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout",
"def get_batch(self, src, geometries):\n\n batch = []\n for bounds in geometries.bounds.itertuples():\n bot, left = src.index(bounds[1], bounds[2])\n top, right = src.index(bounds[3], bounds[4])\n window = rasterio.windows.Window(left, top, right-left, bot-top)\n batch.append(src.read(indexes=self.indexes, window=window))\n if self.interleave == 'pixel' and len(batch[-1].shape) == 3:\n batch[-1] = np.moveaxis(batch[-1], 0, -1)\n for func,args,kwargs in self.preprocess.values():\n batch[-1] = func(batch[-1], *args, **kwargs)\n\n return np.stack(batch)",
"def getBgImgs(bgSource, startPic=1, picsPerRep=2, rmHighCounts=True, bgWeights=[], weightBackgrounds=True):\n if type(bgSource) == int:\n with exp.ExpFile(bgSource) as file:\n allpics = file.get_pics() # probably fails\n if type(bgSource) == type(np.array([])) or type(bgSource) == type([]):\n allpics = bgSource\n #bgPics = allpics[startPic::picsPerRep]\n bgPics = [setPics[startPic::picsPerRep] for setPics in allpics]\n if rmHighCounts:\n bgPics = [rmHighCountPics(setPics, 7000) for setPics in bgPics]\n if weightBackgrounds:\n assert(len(bgPics) == len(bgWeights))\n avgPcBg = avgBg = np.zeros(np.array(bgPics[0][0]).shape)\n for weight, setPics in zip(bgWeights, bgPics):\n avgBg += weight/sum(bgWeights) * np.mean(setPics,0)\n avgPcBg += weight/sum(bgWeights)*photonCounting(setPics, 120)[0] / len(setPics)\n else:\n avgBg = np.mean(bgPics,0)\n avgPcBg = photonCounting(bgPics, 120)[0] / len(bgPics)\n return avgBg, avgPcBg",
"def make_source(self):\n sources = []\n for feature in self.regions_json['features']:\n sources.append(dict(type= 'FeatureCollection', features = [feature]))\n return sources",
"def _sample_frcnn_minibatch_per_image(model,\n proposal_boxlist,\n gt_boxlist):\n (loc_targets, loc_weights, cls_targets, cls_weights, msk_targets, _\n ) = model._frcnn_target_assigner.assign(proposal_boxlist, gt_boxlist)\n\n # `cls_weights` is set to ones of shape [max_num_proposals] if all proposals\n # have classification weight being 0.\n cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0))\n positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)\n\n # [max_num_proposals], indicator matrix sum to val <= `frcnn_minibatch_size`\n sampled_indicator = model._frcnn_minibatch_sampler_fn(\n tf.cast(cls_weights, tf.bool),\n model._frcnn_minibatch_size,\n positive_indicator)\n\n sampled_indices = tf.reshape(tf.where(sampled_indicator), [-1])\n\n proposal_boxlist.set_field('cls_targets', cls_targets)\n proposal_boxlist.set_field('cls_weights', cls_weights)\n proposal_boxlist.set_field('loc_targets', loc_targets)\n proposal_boxlist.set_field('loc_weights', loc_weights)\n if msk_targets is not None:\n proposal_boxlist.set_field('msk_targets', msk_targets)\n\n return box_list_ops.gather(proposal_boxlist, sampled_indices)",
"def sources_range(start=3, end=5, frq=151):\n assert start < end, \"Requested range must be of positive width\"\n valid_sources = []\n for gleam_obj in catalog.obj_catalog:\n if gleam_obj.flux_by_frq[frq] <= end and \\\n gleam_obj.flux_by_frq[frq] >= start:\n valid_sources.append(gleam_obj)\n print(\"Number of valid sources encountered:\", len(valid_sources))\n return valid_sources",
"def subtract_2d_background(source, background):\n def _subtract_2d_background(model, background):\n result = model.copy()\n # Handle individual NIRSpec FS, NIRSpec MOS\n if isinstance(model, datamodels.MultiSlitModel):\n for slit, slitbg in zip(result.slits, background.slits):\n slit.data -= slitbg.data\n\n # Handle MIRI LRS, MIRI MRS and NIRSpec IFU\n elif isinstance(model, (datamodels.ImageModel, datamodels.IFUImageModel)):\n result.data -= background.data\n\n else:\n # Shouldn't get here.\n raise RuntimeError(\"Input type {} is not supported.\"\n .format(type(model)))\n return result\n\n # Handle containers of many datamodels\n if isinstance(source, datamodels.ModelContainer):\n result = datamodels.ModelContainer()\n result.update(source)\n for model, bg in zip(source, background):\n result.append(_subtract_2d_background(model, bg))\n\n # Handle single datamodels\n elif isinstance(source, (datamodels.ImageModel, datamodels.IFUImageModel, datamodels.MultiSlitModel)):\n result = _subtract_2d_background(source, background)\n\n else:\n # Shouldn't get here.\n raise RuntimeError(\"Input type {} is not supported.\"\n .format(type(source)))\n\n return result",
"def prepare_stars_that_need_bg_ols():\n\n wanted = Table.read(\n '/priv/mulga1/marusa/chronostar_projects/solar_neighbourhood/data/ScoCen_box_result_15M_ready_for_bg_ols.fits')\n\n old = Table.read('../scocen/data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n old_scocen = Table.read(\n '/priv/mulga1/marusa/chronostar_projects/scocen/data/scocen_candidates_300k_only_spatial_cut.fits')\n\n old_solar_neighbourhood_bg_ols = np.loadtxt('bgols_multiprocessing_0.dat')\n wanted0 = wanted[:len(old_solar_neighbourhood_bg_ols)]\n\n # DELETE THIS!!!\n wanted = wanted[len(old_solar_neighbourhood_bg_ols):]\n\n mask = np.in1d(wanted['source_id'], old['source_id'])\n mask = np.logical_or(mask, np.in1d(wanted['source_id'], old_scocen['source_id']))\n # ~ mask = np.logical_or(mask, np.in1d(wanted['source_id'], old_solar_neighbourhood['source_id']))\n\n # Looking for stars that do NOT have bg ols yet\n mask = ~mask\n\n todo = wanted[mask]\n print\n len(todo)\n print\n len(old), len(wanted), len(wanted) - len(old)\n\n todo.write('solar_neighbourhood_determine_bg_ols_for_these_stars.fits', format='fits')",
"def source_list(self):\n return self._source_list",
"def source_list(self):\n return self._source_list",
"def get_neighbourhood(self, source: int) -> Iterable[GraphEdge]:\n return filter(\n lambda e: e.fst == source,\n self.__edges\n )",
"def early_gen_test_clip(self, list_files, clip_id, stride=1):\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),\n np.zeros((1, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(list_files[start:start + self.time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][0][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels",
"def background_detection(dataset, radius, normal_threshold):\n length = len(dataset)\n normal_list = []\n tree = scipy.spatial.cKDTree(dataset)\n for x, y, z in zip(original_x_int_array, original_y_int_array, original_z_int_array):\n indices = tree.query_ball_point([x, y, z], radius)\n if len(indices) <= 3:\n normal_list.append(0)\n continue\n idx = tuple(indices)\n data = np.vstack([dataset[idx, 0], dataset[idx, 1], dataset[idx, 2]])\n cov = np.cov(data)\n evals, evects = la.eigh(cov)\n evals = np.abs(evals)\n index = evals.argsort()[::-1]\n evects = evects[:, index]\n normal = evects[2][2]\n normal_list.append(normal)\n\n # codes below were region growing algorithm implemented based pseudocode in\n # http://pointclouds.org/documentation/tutorials/region_growing_segmentation.php#region-growing-segmentation\n # awailable voxel list index\n seeds = np.logical_or(np.array(normal_list) > normal_threshold, np.array(normal_list) < -normal_threshold)\n\n seeds = list(np.where(seeds)[0])\n\n voxel_set = np.vstack([original_x_int_array[seeds], original_y_int_array[seeds],\n original_z_int_array[seeds]]).transpose()\n regions = region_growing(voxel_set, GROUND_NEIGHBOR / VOXEL_SIZE)\n\n seeds = np.array(seeds)\n ground = []\n max_len = 0\n if len(regions) == 1:\n ground += seeds[regions[0]]\n else:\n for region in regions:\n if len(region) > max_len:\n max_len = len(region)\n ground = []\n ground = seeds[region]\n # if float(len(region)) / length < 0.2:\n # continue\n # else:\n # ground += list(seeds[region])\n # if len(ground) == 0:\n # ground.append(temp_ground)\n return normal_list, ground",
"def test_sources():\n g = Graph(from_list=[\n (1, 3, 1),\n (2, 4, 1),\n (2, 5, 1),\n (3, 5, 1),\n (4, 6, 1),\n (5, 6, 1),\n ])\n g.add_node(7)\n s = g.sources(5)\n e = {1, 2, 3}\n assert s == e\n\n s2 = g.sources(1)\n e2 = set()\n assert s2 == e2, s2\n\n s3 = g.sources(6)\n e3 = {1, 2, 3, 4, 5}\n assert s3 == e3\n\n s4 = g.sources(7)\n e4 = set()\n assert s4 == e4",
"def create_in_range_lst(source_features: list, user_zip_code: str, radius: int,\n active_set: set, second_dose: bool,\n provider_filter: list) -> list:\n in_range_locs = []\n user_coords = ZIP_MAP_DICT[user_zip_code][0]\n for loc in source_features:\n if loc['properties']['appointments_available'] and\\\n loc['properties']['provider_brand'].lower() in active_set and\\\n (loc['properties']['appointments_available_all_doses'] is True or\n loc['properties']['appointments_available_2nd_dose_only'] is\n second_dose) and \\\n (len(provider_filter) == 0 or\n loc['properties']['provider_brand_name'] in provider_filter):\n # format [latitude, longitude]\n loc_coord = loc['geometry']['coordinates'][::-1]\n if loc_coord != [None, None]:\n if earth_distance(loc_coord[0], loc_coord[1],\n user_coords[0], user_coords[1]) <= radius:\n in_range_locs.append(loc)\n return in_range_locs",
"def fill_srclist( self, ra=None, dec=None, max_roi=20, free_roi=5, tsmin=0, **kwargs ):\n\n self.__dict__.update(kwargs)\n self.srclist = []\n\n impl = minidom.getDOMImplementation()\n xmldoc_out = impl.createDocument(None, \"source_library\", None)\n xmldoc_out.documentElement.setAttribute('title', \"source library\")\n\n # ================================================================ \n # ====================== DIFFUSE BACKGROUND ======================\n # ================================================================\n # GALACTIC\n if self.galactic is not None:\n source_out = xmldoc_out.createElement('source')\n source_out.setAttribute('name', self.galactic_name)\n source_out.setAttribute('type', \"DiffuseSource\")\n \n spectrum_out = xmldoc_out.createElement('spectrum')\n spectrum_out.setAttribute('type', \"PowerLaw\")\n spectrum_out.appendChild(parameter_element(\"1\", \"Prefactor\", \"100.0\", \"1e-5\", \"1\", \"1.\"))\n spectrum_out.appendChild(parameter_element(\"1\", \"Index\", \"1\", \"-1\", \"1\", \"0.\"))\n spectrum_out.appendChild(parameter_element(\"0\", \"Scale\", \"2000\", \"50\", \"1\", \"500\"))\n source_out.appendChild(spectrum_out)\n \n spatial_out = xmldoc_out.createElement('spatialModel')\n spatial_out.setAttribute('type', \"MapCubeFunction\")\n spatial_out.setAttribute('file', self.galactic)\n spatial_out.appendChild(parameter_element(\"0\", \"Normalization\", \"1000.0\", \"0.001\", \"1\", \"1\"))\n source_out.appendChild(spatial_out)\n \n xmldoc_out.documentElement.appendChild(source_out)\n\n # ISOTROPIC\n if self.isotropic is not None:\n source_out = xmldoc_out.createElement('source')\n source_out.setAttribute('name', self.isotropic_name)\n source_out.setAttribute('type', \"DiffuseSource\")\n \n spectrum_out = xmldoc_out.createElement('spectrum')\n spectrum_out.setAttribute('type', \"FileFunction\")\n spectrum_out.setAttribute('file', self.isotropic)\n spectrum_out.appendChild(parameter_element(\"1\", \"Normalization\", \"1000.0\", \"1e-05\", \"1.0\", \"1.0\"))\n source_out.appendChild(spectrum_out)\n \n spatial_out = xmldoc_out.createElement('spatialModel')\n spatial_out.setAttribute('type', \"ConstantValue\")\n spatial_out.appendChild(parameter_element(\"0\", \"Value\", \"10.0\", \"0.0\", \"1.0\", \"1.0\"))\n source_out.appendChild(spatial_out)\n \n xmldoc_out.documentElement.appendChild(source_out)\n\n # Limb smooth\n '''\n if self.limb is not None:\n source_out = xmldoc_out.createElement('source')\n source_out.setAttribute('name', \"LIMB\")\n source_out.setAttribute('type', \"DiffuseSource\")\n \n spectrum_out = xmldoc_out.createElement('spectrum')\n spectrum_out.setAttribute('type', \"ConstantValue\")\n # spectrum_out.setAttribute('file', limb_spec_filename)\n spectrum_out.appendChild(parameter_element(\"0\", \"Value\", \"10\", \"0.1\", \"1.0\", \"1\"))\n source_out.appendChild(spectrum_out)\n \n spatial_out = xmldoc_out.createElement('spatialModel')\n spatial_out.setAttribute('type', \"MapCubeFunction\")\n spatial_out.setAttribute('file', limb_filename)\n spatial_out.appendChild(parameter_element(\"0\", \"Normalization\", \"1e3\", \"1e-3\", \"1\", \"1\"))\n source_out.appendChild(spatial_out)\n \n xmldoc_out.documentElement.appendChild(source_out) \n '''\n\n # ====================== SOURCE LIST ======================\n # add the nearby sources from the LAT xml catalog\n # ========================================================= \n nbsrc = 0\n\n for source in self.sourcelist:\n\n phflux, err_phflux = 0, 0\n eflux, err_eflux, ts_value = 0, 0, 0\n ra_xml, dec_xml = 0, 0\n \n srcname = source.getAttribute('name')\n if source.getAttribute('type'): type = source.getAttribute('type')\n if source.getAttribute('TS_value'): ts_value = float(source.getAttribute('TS_value'))\n if source.getAttribute('Energy_Flux100'): eflux = source.getAttribute('Energy_Flux100')\n if source.getAttribute('Unc_Energy_Flux100'): err_eflux = source.getAttribute('Unc_Energy_Flux100')\n if source.getAttribute('Flux100'): phflux = source.getAttribute('Flux100')\n if source.getAttribute('Unc_Flux100'): err_phflux = source.getAttribute('Unc_Flux100')\n if source.getAttribute('RA'): ra_xml = float(source.getAttribute('RA'))\n if source.getAttribute('DEC'): dec_xml = float(source.getAttribute('DEC'))\n \n spectrumList = source.getElementsByTagName('spectrum')\n specparamlist = spectrumList[0].getElementsByTagName('parameter')\n \n spatialList = source.getElementsByTagName('spatialModel')\n spatialParamList = spatialList[0].getElementsByTagName('parameter')\n \n for spparam in spatialParamList:\n spparam_name = str(spparam.getAttribute('name'))\n spparam_value = float(spparam.getAttribute('value'))\n if spparam_name == 'RA': ra_xml = spparam_value\n if spparam_name == 'DEC': dec_xml = spparam_value\n \n if type == 'DiffuseSource':\n template = spatialList[0].getAttribute('file')\n template_filename = os.path.join(self.template_dir,template)\n if os.path.isfile(template_filename):\n spatialList[0].setAttribute('file',template_filename)\n for src in self.DiffuseSourceList:\n if os.path.basename(template) == src['name']:\n ra_xml, dec_xml = src['ra'], src['dec']\n else: ra_xml, dec_xml = -1, -1\n if srcname == self.galactic_name: ra_xml, dec_xml = 0, 0\n if srcname == self.isotropic_name: ra_xml, dec_xml = 0, 0\n \n # =====================================\n # check if the source is within MAX_ROI\n # =====================================\n angsep = sepangle_deg(ra,dec,ra_xml,dec_xml)\n if (angsep < max_roi or (ra_xml==0 and dec_xml==0)) and ts_value >= tsmin:\n\n self.srclist += [{'name':srcname,'ra':ra_xml,'dec':dec_xml,'angsep':angsep,'ts':ts_value,\n 'type':type,'phflux':float(phflux),'err_phflux':float(err_phflux),\n 'eflux':float(eflux),'err_eflux':float(err_eflux)}]\n\n for specparam in specparamlist:\n specparam_name = specparam.getAttribute('name')\n\n if specparam.getAttribute('error'): specparam.removeAttribute(\"error\")\n \n # if the source is out of ROI_INT, the param are not free = 0\n if angsep > free_roi: specparam.setAttribute('free', \"0\")\n\n # if the source is within FREE_ROI all the param are free = 1 \n else: \n mask = ( specparam_name == 'Integral' or specparam_name == 'Prefactor' or specparam_name == 'Index' or\n specparam_name == 'norm' or specparam_name == 'alpha' or specparam_name == 'Index1' or\n specparam_name == 'Cutoff' ) \n if mask: specparam.setAttribute('free', \"1\")\n\n # exception: fit looks more stable\n if specparam_name == 'beta': specparam.setAttribute('free', \"0\")\n\n xmldoc_out.documentElement.appendChild(source)\n\n # sort srclist by angsep\n self.srclist.sort()\n\n # save\n self.xmldoc_out = xmldoc_out"
]
| [
"0.63399184",
"0.5802796",
"0.54545987",
"0.541729",
"0.5401223",
"0.5358",
"0.5306327",
"0.5305919",
"0.52601194",
"0.5211193",
"0.52072334",
"0.5139423",
"0.51375467",
"0.51334727",
"0.51139283",
"0.50835663",
"0.505516",
"0.5038187",
"0.50375575",
"0.50116676",
"0.5009155",
"0.49995032",
"0.4991251",
"0.4991251",
"0.49871072",
"0.49719286",
"0.49471626",
"0.4932249",
"0.49312934",
"0.4922473"
]
| 0.75503904 | 0 |
Check whether an email address is watching an object. | def check_watch(kls, id, email, event_type=None, locale=''):
ct = ContentType.objects.get_for_model(kls)
kwargs = {'content_type': ct, 'watch_id': id, 'email': email,
'locale': locale}
if event_type:
kwargs['event_type'] = event_type
return EventWatch.uncached.filter(**kwargs).exists() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isEmailUsed(self, email):\n\n\t\ttestq = {\"email\": email};\n\t\ttest_result = self.db.request(\"getOne\", testq);\n\n\t\tif test_result:\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;",
"def watching(self):\n return self.get() in self._refs",
"def has_object_permission(self, request, view, obj):\n if request.method == \"PUT\" or request.method == \"PATCH\":\n return request.user.id == obj.organiser_id\n\n return obj.participant_set.filter(user_id=request.user.id, meeting_id=obj.id).count() == 1",
"def checkMailAddress(obj, someAddr):\n # #5353 use checkEmailAddress from CMFDefault utils instead of\n # validateSingleEmailAddress from plone_utils as the plone email validator \n # is better at detecting invalid addreses\n try:\n checkEmailAddress(someAddr)\n except EmailAddressInvalid:\n return False\n return True",
"def get_is_responded(self, obj: Vacancy) -> bool:\n request = self.context.get(\"request\")\n if not request or not request.user or request.user.is_anonymous:\n return False\n return obj.responded_users.filter(pk=request.user.pk).exists()",
"def has_object_permission(self, request, view, obj):\n\n return request.user == obj",
"def has_object_permission(self, request, view, obj):\n return request.user == obj",
"def notifyer(notifyer):\n\n if '@' in notifyer:\n return True\n else:\n return False",
"def is_live(self, obj):\n most_appropriate_object = get_appropriate_object_from_model(self.model)\n if most_appropriate_object == obj:\n return True\n return False",
"def _email_allowed(self, tool):\n if 'emails' not in self.watchdb[tool]:\n self.watchdb[tool]['emails'] = []\n\n sent = self.watchdb[tool]['emails']\n now = time.time()\n limit_minute = now - 300\n if sum(e > limit_minute for e in sent) >= 1:\n return False\n\n limit_max = now - 3600\n if sum(e > limit_max for e in sent) >= 5:\n return False\n\n self.watchdb[tool]['emails'] = [e for e in sent if e > limit_max]\n self.watchdb[tool]['emails'].append(now)\n return True",
"def has_object_permission(self, request, view, obj):\n\n try:\n Contact.objects.get(user=request.user)\n\n except Contact.DoesNotExist:\n return False\n\n return True",
"def has_object_permission(self, request, view, obj):\n return request.user.id == obj.user_id",
"def can_be_registered(self, event_type):\n return (event_type in self._watchable_events or\n (event_type == self.ANY and self._allow_any))",
"def user_is_attendee(user):\n exists = check_attendee_exists(user, user)\n if exists[0]:\n return True\n return False",
"def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.family or obj.family is None:\n return True\n return False",
"def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.registration.child.family:\n return True\n return False",
"def get_is_subscribed(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n return profile in obj.subscribed_users.all()",
"def checkIsEmailAvailable(self, email):\n\n return User.objects.filter(email=email).exists()",
"def is_replied_to(thread):\r\n messages = thread['messages']\r\n if len(messages) < 2:\r\n return False\r\n user_email = get_sender_email(messages[0])\r\n for i in range(1, len(messages)):\r\n sender_email = get_sender_email(messages[i])\r\n if user_email != sender_email:\r\n return True\r\n return False",
"def __contains__(self, obj):\n return obj in self.actors",
"def has_object_permission(self, request, view, obj):\n\n return obj.active",
"def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n # When the user make a request It will check that is on Safe methods, so it return true if the user is \n # trying to update is own profile or return false. And also it will return the obj.id == request.user.id\n return obj.id == request.user.id",
"def has_object_permission(self, request, view, obj):\n usuario_request = request.user\n usuario_a_modificar = obj\n\n return usuario_request != usuario_a_modificar",
"def has_object_permission(self, request, view, obj):\n if request.user == obj.family or obj.family is None:\n return True\n return False",
"def has_object_permission(self, request, view, obj):\n owner_field = getattr(view, \"owner_field\", None)\n\n if owner_field is None:\n # if no owner_field is specified, the object itself is compared\n owner = obj\n else:\n # otherwise we lookup the owner by the specified field\n owner = getattr(obj, owner_field)\n\n return owner == request.user",
"def get_is_interested(self, obj):\n # pylint: disable=no-member\n user = self.context['request'].user\n if not user.is_authenticated:\n return None\n profile = UserProfile.objects.get(user=user)\n return profile in obj.interested_users.all()",
"def device_matches_object(self, obj=None):\n\n\t\treturn self.device_is_configured and self.config_match(obj=obj)",
"def exists(self, obj):\n return False",
"def has_object_permission(self, request, view, obj):\n # if the user is trying to retrieve to create a item.. it will return true\n if request.method in permissions.SAFE_METHODS:\n return True\n # check if the user is trying to don't do a SAFE_METHODS, put,patch,delete and if the feed owner is doing it or another different user.. and it will return true if match or false if not\n return obj.user_profile.id == request.user.id",
"def has_object_permission(self, request, view, obj):\n\n #check if method is get i.e user only want to view\n if request.method in permissions.SAFE_METHODS:\n return True\n\n #if method is not get then will check if user wants to edit own profile\n return obj.id == request.user.ids"
]
| [
"0.6107086",
"0.61063015",
"0.6095941",
"0.59761345",
"0.5868632",
"0.58556944",
"0.585292",
"0.5732046",
"0.5728863",
"0.57023937",
"0.5698885",
"0.56671286",
"0.5618365",
"0.56150615",
"0.5568204",
"0.55669874",
"0.55644435",
"0.5563092",
"0.55369306",
"0.55316216",
"0.5521627",
"0.5518524",
"0.5502571",
"0.5499123",
"0.5493498",
"0.54884726",
"0.54731596",
"0.5472103",
"0.54706097",
"0.5457707"
]
| 0.6465527 | 0 |
Destroy a watch on an object. If watch does not exist, return False. | def destroy_watch(kls, id, email, event_type=None, locale=''):
ct = ContentType.objects.get_for_model(kls)
kwargs = {'content_type': ct, 'watch_id': id, 'email': email,
'locale': locale}
if event_type:
kwargs['event_type'] = event_type
w = EventWatch.objects.filter(**kwargs)
count = w.count()
w.delete()
return count > 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_destroy(self) -> bool:\r\n raise NotImplementedError",
"def destroy(self):\n return True",
"def destroyExternal(self, remoteObject):\r\n if remoteObject == self.__obj:\r\n self.destroy()\r\n return True\r\n\r\n return False",
"def cancel_watch():\n global watcher, watching\n if watcher is not None:\n watcher.finish()\n watcher = None\n watching = False",
"def destroyed(self) -> bool:\n return self._ptr is None",
"def delete_object(self, object_id: str) -> bool:\n del self.objects[object_id]",
"def _destroy_resource(resource):\n global _existing\n if _existing[resource]:\n print('{v} a {r} with id: {i}.'.format(\n v='Would destroy' if dry else 'Destroying',\n r=resource,\n i=_existing[resource].id\n ))\n\n if dry:\n return True\n else:\n try:\n # _existing[resource].delete()\n getattr(_existing[resource], definitions[resource].destroy)()\n\n if resource == 'vm':\n # untag resource in case a UP follow very quickly: the instance,\n # although terminating, still exists for a while\n print('Postfixing tag of instance {} with -terminated'.format(_existing[resource].id))\n _tag_resource(_existing[resource], tags={args.tag: args.role + '-terminated'})\n\n _existing[resource] = None\n\n except AttributeError as e:\n\n if resource == 'vm':\n state = _existing[resource].state['Name']\n if state in ['terminated', 'shutting-down']:\n print('Trying to delete a vm {i} wich is {s}. not an issue.'.format(\n i=_existing[resource].id,\n s=state\n ))\n return True\n\n # all other cases are problems\n traceback.print_exc()\n return False\n\n except Exception as e:\n print('Could not destroy resource {r}, id {i}. Reason just below.'.format(\n r=resource,\n i=_existing[resource].id,\n ))\n traceback.print_exc()\n return False\n return True\n else:\n print('Trying to destroy a {r} tagged {k}:{v}, but none found'.format(\n r=resource,\n k=args.tag,\n v=args.role\n ))\n return False",
"def do_destroy(self, arg):\n obj = self.verify(arg, 2)\n if obj:\n del storage.all()[obj]\n storage.save()",
"def destroy(self, proxy):\n\n if isinstance(proxy, Proxy):\n return proxy.destroy()\n else:\n return False",
"def destroy_check(self):\n pass",
"def force_destroy(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"force_destroy\")",
"def force_destroy(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"force_destroy\")",
"def force_destroy(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"force_destroy\")",
"def do_destroy(self, arg):\n args = shlex.split(arg)\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n \"\"\"delete from FileStorage.__objects\"\"\"\n del stored_objects[instance]\n \"\"\"overwrite the new data to file.json\"\"\"\n models.storage.save()",
"def unwatch(self):\n if self._transaction_state not in (None, \"watch\"):\n raise ValueError(\"UNWATCH inside MULTI is not allowed\")\n self._transaction_state = None\n return self._command(b'UNWATCH', handler=\"OK\")",
"def unwatch(self):\n pass",
"def destroy(self):\n\n node = self.node\n if not config.is_node_destroyable(node.name):\n logger.error('node %s has non-destroyable prefix' % node.name)\n return False\n logger.info('destroying node %s' % node)\n return node.destroy()",
"def ensure_watch(self):\n if self._stopped:\n self._stopped = False\n self.watch = self.zk_client.DataWatch(self.version_node,\n self._update_version_watch)",
"def perform_destroy(self, instance):\n pass",
"def remote_destroy(self):\r\n if not self._terminating:\r\n if self._container:\r\n self._terminating = self._stop()\r\n self._terminating.addBoth(passthrough(self._destroy))\r\n else:\r\n self._terminating = succeed(None)\r\n\r\n return self._terminating",
"def remove_instance_state_on_destroy(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"remove_instance_state_on_destroy\")",
"def remove_instance_state_on_destroy(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"remove_instance_state_on_destroy\")",
"async def delete(self, obj: Obj):\n if self.ref_counts[obj] == 1:\n del self.ref_counts[obj]\n await self.deinit(obj)\n else:\n self.ref_counts[obj] -= 1",
"def setDestroy(self):\n self.destroy = True",
"def DestroyTimer(self, obj, event):\n return 1",
"def delete(self) -> bool:\n return False",
"def destroy(self):\r\n self._obj.destroy()\r\n self._obj = None",
"def delete(self, bucket, object, generation=None):\n service = self.get_conn()\n\n try:\n service \\\n .objects() \\\n .delete(bucket=bucket, object=object, generation=generation) \\\n .execute()\n return True\n except errors.HttpError as ex:\n if ex.resp['status'] == '404':\n return False\n raise",
"def watchpoint_clear(self, handle):\n return not self._dll.JLINKARM_ClrDataEvent(handle)",
"def destroy():\n pass"
]
| [
"0.62754756",
"0.5955956",
"0.5868036",
"0.5828437",
"0.5735238",
"0.5553308",
"0.5533823",
"0.5523336",
"0.5507208",
"0.5486196",
"0.5424214",
"0.5418747",
"0.5418747",
"0.5361335",
"0.5355246",
"0.5303378",
"0.52623045",
"0.5161526",
"0.5125425",
"0.5107519",
"0.5049215",
"0.5049215",
"0.50444067",
"0.5023062",
"0.5015976",
"0.4981075",
"0.4971799",
"0.4968667",
"0.49480125",
"0.49361795"
]
| 0.5982804 | 1 |
Initializes the values dictionary for the specified section. | def __init__(self, section):
self.values = section | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fill_section_dict(self, dictionary, section, keys=[], streams=1):\n\t\t# if there is no stream, we just place a note in the dictionary\n\t\tif streams == 0:\n\t\t\tdictionary[section] = 'No stream'\n\t\t\treturn dictionary\n\n\t\t# otherwise we have to fill one subdictionary per stream\t\t\n\t\tfor i in range(streams):\n\t\t\tdictionary[section][i] = {}\n\t\t\t# lookup the information for every key requested and fill the dictionary accordingly\n\t\t\tfor key in keys:\n\t\t\t\tif (section + '_' + str(i) + '_' + key) in self.full_info.keys():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdictionary[section][i][key] = int(self.full_info[section + '_' + str(i) + '_' + key])\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tdictionary[section][i][key] = float(self.full_info[section + '_' + str(i) + '_' + key])\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tdictionary[section][i][key] = self.full_info[section + '_' + str(i) + '_' + key]\n\t\t\n\t\treturn dictionary",
"def to_dict(self, section):\n\t\t\n\t\tdct = {}\n\t\t\n\t\tfor name, value in self.items(section):\n\t\t\tdct[name] = self.parse_value(value)\n\t\t\n\t\treturn dct",
"def _create_sections(self):\n self._SECTIONS = {}",
"def __init__(self):\n self.vals = {}",
"def __init__(self, section, diskDevice):\n # Section option names are generated\n self.sectionOptions = []\n self.diskDevice = diskDevice\n\n # Grab our partition map\n for part in section.Partition:\n # Build device + slice + partition number, and append it to\n # sectionOptions\n slice = self.diskDevice + '-' + part.getSectionName()\n self.sectionOptions.append(slice)\n\n # Partition settings\n if (part.softupdates):\n setattr(self, slice, \"%s %d %s 1\" % (part.type, part.size, part.mount))\n else:\n setattr(self, slice, \"%s %d %s\" % (part.type, part.size, part.mount))\n\n # Ensure that partitions are in order (1 ... 9)\n self.sectionOptions.sort()",
"def set_section_data(self, section_name, value):\n section_name = JSONSchema.format_section_name(section_name)\n self._sections[section_name] = self._json_schema.check_section_value(section_name, value)\n return True",
"def set_sections(self, sections):\n assert type(sections) == dict\n self._sections = sections",
"def parse(self, section_dict):\n self.dict = section_dict\n for option in section_dict:\n if option not in self.optionnames:\n print(\"Warning: Unknown option: {:s} in section {:s}\".format(\n option, self.name), file=sys.stderr\n )\n for option, name in zip(self.options, self.optionnames):\n self.dict[name] = option.parse(self)\n return self.dict",
"def initializeFromDict(self, inputDict):\n pass",
"def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord",
"def __init_values(self, values):\n for name, value in list(values.items()):\n if name in initializable_parameters:\n setattr(self, name, value)",
"def _get_section_data(self, section):\n \n # unit number\n apt_name = section['name']\n \n try:\n # get the number of bedrooms and bathrooms based \n # on the specific section dictionary\n bedrooms_text = section['bedrooms']['fullValue']\n bathrooms_text = section['bathrooms']['fullValue']\n bedrooms = self._extract_num(bedrooms_text)\n bathrooms = self._extract_num(bathrooms_text)\n except:\n bedrooms, bathrooms = np.nan, np.nan\n\n try:\n # get the square foot area of the unit \n space = float(section['floorSpace']['max'])\n except:\n space = np.nan\n\n try:\n # get the rent price of the unit \n price_text = section['priceRange']['formattedPrice']\n price_text = price_text.replace(',', '') \\\n .replace('$', '')\n price = self._extract_num(price_text)\n except:\n price = np.nan\n \n # construct the section data\n section_data = [\n apt_name,\n bedrooms,\n bathrooms,\n space,\n price,\n ]\n \n return section_data",
"def initialize(self):\n\n for i, item in enumerate(self.v.items()):\n state, value = item\n if value == None:\n raise ValueError, \"state '%s' has no value\" % state\n self.S[i]=value\n self.storage=Storage()",
"def load(value_dict: dict, value_count_dict: dict, \\\n Unit_Analyzer=None, align_dict={}, main_unit_dict={}):\n self._value_dict = value_dict\n self._initialize()\n self._Unit_Analyzer = Unit_Analyzer\n self._align_dict = align_dict\n self._main_unit_dict = main_unit_dict",
"def _add_setting(self, section, setting, value):\n# section = section.lower()\n if section not in self.keys():\n self[section] = dict()\n self.logger.debug(\"Adding %s.%s: %s\" % (section, setting, value))\n self[section][setting] = value",
"def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]",
"def load(value_count_dict: dict, \\\n Unit_Analyzer=None, align_dict={}, main_unit_dict={}, value_dict: dict = {}):\n self._value_count_dict = value_count_dict\n self._initialize()\n self._value_dict = value_dict\n self._Unit_Analyzer = Unit_Analyzer\n self._align_dict = align_dict\n self._main_unit_dict = main_unit_dict",
"def initialize(self,inputDict):\n pass",
"def fill_import_section():\n section = _SectionData(\"Import\")\n section.props.append((\"ImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_scale)))\n section.props.append((\"PreservePathForExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_preserve_path_for_export))))\n section.props.append((\"ImportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pim_file))))\n section.props.append((\"UseWelding\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_welding))))\n section.props.append((\"WeldingPrecision\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_welding_precision))))\n section.props.append((\"UseNormals\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_normals))))\n section.props.append((\"ImportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pit_file))))\n section.props.append((\"LoadTextures\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_load_textures))))\n section.props.append((\"ImportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pic_file))))\n section.props.append((\"ImportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pip_file))))\n section.props.append((\"ImportPisFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pis_file))))\n section.props.append((\"ConnectedBones\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_connected_bones))))\n section.props.append((\"BoneImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_bone_scale)))\n section.props.append((\"ImportPiaFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pia_file))))\n section.props.append((\"IncludeSubdirsForPia\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_include_subdirs_for_pia))))\n return section",
"def set(self, section, option, value):\n if not self._dict.has_key(section):\n self._dict[section] = {}\n \n self._dict[section][option] = value",
"def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()",
"def _read_section_config(self, req, section_name, default_values, custom_options = None):\n def _assemble_option(option_name, stored_value):\n option = self._gather_option_data(req, section_name, option_name, section_default_values)\n stored_value = self._convert_value(stored_value, option['option_info'])\n\n does_exist, value = self._get_session_value(req, section_name, option_name)\n if does_exist:\n option['value'] = value\n else:\n option['value'] = stored_value\n \n option['stored_value'] = stored_value\n return option\n \n options = {}\n section_default_values = default_values.get(section_name, None)\n\n for option_name, stored_value in self.config.options(section_name):\n options[option_name] = _assemble_option(option_name, stored_value)\n \n if custom_options is None:\n custom_options = self._get_session_custom_options(req, section_name)\n \n if section_name in custom_options:\n for option_name in custom_options[section_name].keys():\n if option_name in options:\n continue\n \n options[option_name] = _assemble_option(option_name, None)\n \n return options",
"def __init__(self, sections):\n self._ptr = W.config_new(\n SCRIPT_NAME, SCRIPT_NAME + \"_config_reload_cb\", \"\"\n )\n\n for section in sections:\n name, options = section\n section_class = ConfigSection.build(name, options)\n setattr(self, name, section_class(name, self._ptr, options))",
"def initDictionnary(self):\n partitions = self.vocabulary.getPartitions()\n for partition in partitions:\n for mod in partition.modalities:\n self.summaryDict[partition.getAttName() + \" : \" + mod] = 0.0\n self.summaryFilteredDict[partition.getAttName() + \" : \" + mod] = 0.0",
"def _write_section_values(section_data, fobj):\n\n # Order is significant.\n section_dict = OrderedDict()\n section_dict['Armor'] = section_data.get('armor')\n section_dict['Internals'] = section_data.get('internals')\n section_dict['Rear'] = section_data.get('rear')\n section_dict['Config'] = section_data.get('config')\n\n for name, value in section_dict.items():\n if not value:\n continue\n val_str = \" {name:<14} {{ {value} }}\\n\".format(\n name=name, value=value)\n fobj.write(val_str)",
"def getOptionsDict(self, section):\n answer = {}\n for option in self.getOptions(section):\n answer[option] = self.get(section, option)\n return answer",
"def initializeFromDict(self, inputDict):\n for idx, val in enumerate(inputDict['outcome']):\n self.mapping[val] = inputDict['state'][idx]\n self.values.add(val)\n\n self.checkDistParams()",
"def add_section(self, section):\n if self.has_section(section):\n raise DuplicateSectionError(section)\n self._dict[section] = {}",
"def __init__(self):\n self.sections = {} # type: Dict[str, NetSection]",
"def parse_section(section):\n data = {}\n for line in section.splitlines(False):\n if not line:\n continue\n if not line.startswith(' '):\n # new key/value\n key, _, value = line.partition(': ')\n data[key] = value\n else:\n # continuation of the previous value\n data[key] += line[1:]\n return data"
]
| [
"0.64017636",
"0.6107499",
"0.58912754",
"0.57488894",
"0.56328344",
"0.5587617",
"0.5584594",
"0.54791665",
"0.544328",
"0.53737956",
"0.5343547",
"0.5333064",
"0.5323612",
"0.531561",
"0.5300138",
"0.52883494",
"0.5287598",
"0.52874094",
"0.5277978",
"0.52705735",
"0.52675694",
"0.52608806",
"0.5245739",
"0.52123284",
"0.52108485",
"0.5179277",
"0.517379",
"0.5172575",
"0.5155585",
"0.5152546"
]
| 0.7376099 | 0 |
The correct class method to obtain the singleton instance of Config. | def get_instance(cls):
if not cls._instance:
cls._instance = Config()
return cls._instance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_config(cls) -> \"__Config\":\n if cls.__instance is None:\n cls.__instance = cls.__Config()\n return cls.__instance",
"def config():\n return Config()",
"def config():\n return Config()",
"def get_instance() -> Config:\n if not Config.__instance__:\n obj = Config()\n log.info(\"Configuration Found\")\n log.info(\"{:<35}{:<40}\".format(\"--KEY--\", \"--VALUE--\"))\n for key in sorted(list(templated)):\n log.info(\n \"{:<35}{:<40}\".format(\n key, str(getattr(obj, key)) if getattr(obj, key) else \"\"\n )\n )\n log.info(\"Configuration Finished\")\n return obj\n return Config.__instance__",
"def get_instance(app: Sanic = None):\n if Configs.__instance is None:\n Configs(app)\n return Configs.__instance",
"def config() -> Config:\n return Config()",
"def get_config(cls):\n if not cls.config:\n import aha\n cls.config = aha.Config()\n return cls.config",
"def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Config, cls).__new__(cls)\n return cls._instance",
"def get_config() -> Config:\n return get_or_set('config', build_configuration)",
"def get(cls):\n\t\tif cls.__ConfigParser is None:\n\t\t\traise RuntimeError('Enter the Config context first.')\n\t\treturn cls.__ConfigParser",
"def Get():\n return ServiceConfig() # Singleton decorator ensures there's only one",
"def get_config(cls) -> GlobalConfig:\n if cls.config is None:\n raise ValueError(\"Config is not loaded\")\n return cls.config",
"def get_config() -> Config:\n app_config = os.environ.get('APP_CONFIG', 'ProductionConfig')\n config_module = importlib.import_module(\n '.'.join(\n f\"magma.fluentd_client.config.{app_config}\".split('.')[\n :-1\n ],\n ),\n )\n config_class = getattr(config_module, app_config.split('.')[-1])\n return config_class()",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config"
]
| [
"0.86490566",
"0.78078085",
"0.78078085",
"0.77745426",
"0.7647158",
"0.7623508",
"0.7582533",
"0.74804014",
"0.74308735",
"0.73483384",
"0.7274086",
"0.71842724",
"0.71819735",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284",
"0.71663284"
]
| 0.81780726 | 1 |
Resolves/Returns a value from a specified section and a key. | def resolve(self, section, key):
return self.sections[section][key] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_value(self, in_section=str(), in_key=str()) -> Any:\n if in_section in self.__return_map.keys():\n if in_key in self.__return_map.get(in_section).keys():\n return self.__return_map.get(in_section).get(in_key)\n\n return None",
"def __getitem__(self, key):\n for section in self.header.values():\n for ckey, val in section.items():\n if ckey == key:\n return val\n else:\n raise ValueError",
"def __getitem__(self, section):\n result = self.get(section)\n\n if result is None:\n raise KeyError(section)\n\n return result",
"def get(self,section,key):\n value = ConfigParser.get(self,section,key)\n if value.startswith('\"') or value.startswith(\"'\"):\n return value\n if re.search(r\":\",value):\n out_dict = {}\n pieces = valuesplit(\",\")\n for piece in pieces:\n key,v = piece.split(\":\")\n out_dict[key] = translate(v)\n return out_dict\n elif re.search(\",\",value):\n values = value.split(\",\")\n return [translate(v) for v in values]\n return translate(value)",
"def resolve(self, key: str) -> Optional[Any]:\n return self.dict.get(key)",
"def _get_config_value(self, section, key):\n return config.get(section, key)",
"def safe_get(self,section,key,default_value=None):\n try:\n return self.get(section,key)\n except:\n return default_value",
"def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False",
"def lookup(self, key):",
"def resolve(self, key: str) -> Optional[Any]:\n pass",
"def get(section, key, inifile=\"settings.ini\"):\n config = ConfigParser.RawConfigParser()\n config.readfp(open(inifile))\n value = config.get(section, key)\n return value",
"def get(self, section, option, raw=False, vars=None):\n sectiondict = {}\n try:\n sectiondict = self._sections[section]\n except KeyError:\n if section != DEFAULTSECT:\n raise NoSectionError(section)\n # Update with the entry specific variables\n vardict = {}\n if vars:\n for key, value in vars.items():\n vardict[self.optionxform(key)] = value\n d = _Chainmap(vardict, sectiondict, self._defaults)\n option = self.optionxform(option)\n try:\n value = d[option]\n except KeyError:\n raise NoOptionError(option, section)\n\n if raw or value is None:\n return value\n else:\n return self._interpolate(section, option, value, d)",
"def get_safe(self, section, key, default=None):\n try:\n return self.get(section, key)\n except (NoSectionError, NoOptionError):\n return default",
"def lookup(self, key):\n k = self.get_position(key)\n\n if self.keys[k] == key:\n return node.values[k]\n\n # Lookup in the child node.\n if self.refs[k+1] == None:\n return None\n return self.refs[k+1].lookup(key)",
"def find_value(dic, key):\n return dic[key]",
"def GetValue(self, key):\n return self._metadata_dict[key]",
"def _search_and_replace(parser, section):\n INTERPOLATION_RE = re.compile(r\"\\$\\{(?:(?P<section>[^:]+):)?(?P<key>[^}]+)\\}\")\n result = []\n def interpolate_func(match):\n d = match.groupdict()\n s = d.get('section')\n if s is None:\n s = section\n key = d.get('key')\n return parser.get(s, key)\n\n for key, value in parser.items(section):\n value = re.sub(INTERPOLATION_RE, interpolate_func, value)\n result.append(\n (key,value)\n )\n return result",
"def _map___getitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of key should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n return self.second(self.find(key))",
"def __getitem__(self, key):\n self.__check_key_validity(key)\n return self.data[key[0]][key[1]]",
"def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except configparser.NoOptionError:\n return",
"def __getitem__(self, key):\n ndx = self._findPosition(key)\n assert ndx is not None, 'Invalid map key'\n return self._entryList[ndx].value",
"def _extract_by_key(self, line, key):\n search = r'{0}=.+?,'.format(key) # lazy match to first ,\n attr_match = re.search(search, line)\n if attr_match:\n # grab just the value of the attribute from attr_key=value,\n value = attr_match.group()[len(key) + 1 : len(attr_match.group()) - 1]\n return value\n else:\n return \"notfound\"",
"def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value",
"def _GetConfigValue(self, config_parser, section_name, value_name):\n try:\n return config_parser.get(section_name, value_name).decode('utf-8')\n except (configparser.NoOptionError, configparser.NoSectionError):\n return",
"def get(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n return a[h].val\n else:\n return -1",
"def get(self, key):\n if self.defs:\n name = self.defs[0]\n val = self.defs[1]\n old_self = self.defs[2]\n if key == name:\n return val\n else:\n return old_self.get(key)",
"def getSetting(self,section,key,default=None):\n section,key = map(bolt.LString,(section,key))\n settings = self.getSettings()\n if section in settings:\n return settings[section].get(key,default)\n else:\n return default",
"def __getitem__(self, key):\n if key in self._keys:\n # absolute name\n return self._values[key]\n\n elif key in self._auto_ivc_map:\n # We allow the user to query with auto_ivc varname.\n src_key = self._auto_ivc_map[key]\n if src_key in self._keys:\n return self._values[self._auto_ivc_map[key]]\n\n elif key in self:\n # promoted name\n val = super().__getitem__(key)\n if val is _AMBIGOUS_PROM_NAME:\n msg = \"The promoted name '%s' is invalid because it refers to multiple \" + \\\n \"inputs: %s. Access the value using an absolute path name or the \" + \\\n \"connected output variable instead.\"\n raise RuntimeError(msg % (key, str(self._prom2abs[key])))\n else:\n return val\n\n elif isinstance(key, tuple) or self._DERIV_KEY_SEP in key:\n # derivative keys can be either (of, wrt) or 'of!wrt'\n abs_keys, prom_key = self._deriv_keys(key)\n return super().__getitem__(prom_key)\n\n raise KeyError('Variable name \"%s\" not found.' % key)",
"def resolve(self, key: str) -> Any:\n return _ba.resolve_appconfig_value(key)",
"def Get(self, section, var):\n return self.cp.get(section, var)"
]
| [
"0.6781236",
"0.6736435",
"0.6483189",
"0.6275725",
"0.6275108",
"0.61450607",
"0.61202466",
"0.61098176",
"0.6060869",
"0.5912707",
"0.58981955",
"0.58779114",
"0.5876569",
"0.5857488",
"0.58354527",
"0.58303",
"0.5821721",
"0.577139",
"0.57196397",
"0.5710267",
"0.5710064",
"0.5699273",
"0.56899536",
"0.5684084",
"0.56738985",
"0.5667662",
"0.5659045",
"0.5654726",
"0.56526715",
"0.56479704"
]
| 0.8441994 | 0 |
Return a cfg dict containing a db prepopulated with 10 nearly identical factoids The only difference is id. | def cfg_10_identical_factoids(mockcfg):
Factoid = mockcfg["db"].entities["Factoid"]
with orm.db_session():
for i in range(1, 101):
data = {
"@id": "F{:03d}".format(11 - i),
"createdBy": "Creator 1",
"createdWhen": datetime.datetime(2015, 1, 1).isoformat(),
"modifiedBy": "Modifier 1",
"modifiedWhen": datetime.datetime(2015, 1, 2).isoformat(),
"source": {'@id': 'Source 1'},
"person": {'@id': 'Person 1'},
"statements": [{'@id': 'Statement {:03d}'.format(i)}]
}
Factoid.create_from_ipif(data)
yield mockcfg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config_db():",
"def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")",
"def get_old_db_details():\n db_conf_prelim = {\n 'host': None,\n 'port': None,\n 'database': None,\n 'username': None,\n 'password': None }\n\n db_conf = codecs.open(\"db.conf\", encoding=\"utf-8\", mode=\"r\")\n db_conf_text = db_conf.read()\n db_conf.close()\n\n prev_host = search_config(\"metastore.connection-postgresql.host\", \"host\", db_conf_text)\n prev_port = search_config(\"metastore.connection-postgresql.port\", \"port\", db_conf_text)\n prev_database = search_config(\"metastore.connection-postgresql.database\", \"database\", db_conf_text)\n prev_username = search_config(\"metastore.connection-postgresql.username\", \"username\", db_conf_text)\n prev_password = search_config(\"metastore.connection-postgresql.password\", \"password\", db_conf_text)\n db_conf_from_file = {\n 'host': prev_host if prev_host else None,\n 'port': prev_port if prev_port else None,\n 'database': prev_database if prev_database else None,\n 'username': prev_username if prev_username else None,\n 'password': prev_password if prev_password else None }\n\n db_conf_prelim.update(db_conf_from_file)\n return db_conf_prelim",
"def get_config():\n CONFIG.clear() #clear config\n sql=\"SELECT * FROM config\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n # iterate through the results now...\n for r in results:\n CONFIG[r[1]]=r[2]\n conn.commit()\n conn.close()",
"def get_config_db():\n\n datab = {'db_name': 'database_name',\n 'db_url': 'database_url'}\n\n return datab",
"def produce_all_database(is_debug):\n\tproduce_database([\"apnea-ecg\", \"train\"], is_debug)\n\tproduce_database([\"apnea-ecg\", \"test\"], is_debug)",
"def init_table_config_data():\n config_data = {}\n config_data[\"channels\"] = []\n config_data[\"channels_data\"] = {}\n\n return config_data",
"def database(self,id):\n\t\tdb = {1:('COMPUTER',1000.5,100),\n \t\t 2:('MOUSE',10.0,300),\n\t\t 3:('PENCIL BLUE',0.50,500),\n\t\t 4:('PENCIL RED',0.50,600),\n\t\t 5:('PENCIL WHITE',0.50,900),\n\t\t 6:('HEADPHONES',15.7,500),\n\t\t }\n\t\trow = (None,0.0,0)\n\t\ttry:\n\t\t\trow = db[id]\n\t\texcept:\n\t\t\tNone\n\t\treturn row",
"def get_confdb(self, config=None, cleanup=True):\n profile = self.profile.get_profile()\n e = Engine()\n # Insert defaults\n defaults = profile.get_confdb_defaults(self)\n if defaults:\n e.insert_bulk(defaults)\n # Get working config\n if config is None:\n config = self.config.read()\n # Insert raw section\n if self.get_confdb_raw_policy() == \"E\":\n e.insert_bulk((\"raw\",) + t for t in self.iter_config_tokens(config))\n # Parse and normalize config\n e.insert_bulk(self.iter_normalized_tokens(config))\n # Apply applicators\n for applicator in profile.iter_config_applicators(self, e):\n applicator.apply()\n # Remove temporary nodes\n if cleanup:\n e.cleanup()\n return e",
"def read_db():\n f_result = []\n result = execute_query('select sitename, id from {} order by sitename;'.format(TABLES[0]))\n sites = [(x['sitename'], x['id']) for x in result]\n for sitename, site_id in sites:\n sitedict = {'name': sitename}\n querystring = 'select settname, settval from {} order by settname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[1]), (site_id,))\n sitedict['settings'] = {x: y for x, y in cur.fetchall()}\n querystring = 'select dirname, id from {} order by dirname where site_id = %s;'\n result = execute_query(querystring.format(TABLES[2]), (site_id,))\n sitedirs = [(x['dirname'], x['id']) for x in cur.fetchall()]\n sitedict['docs'] = []\n # if we keep the site_id in the docstats table we could restrict this to one db-query\n # and filter the result set inside the loop\n # although this should also be possible with a subselect or something like that\n for dirname, dir_id in sitedirs:\n dirlist = []\n querystring = 'select * from {} order by docname where dir_id = %s;'\n result = execute_query(querystring.format(TABLES[3]), (dir_id,))\n for resultdict in cur:\n resultdict['dirname'] = dirname\n dirlist.append(resultdict)\n sitedict['docs'].append(dirlist)\n f_result.append(sitedict)\n return f_result",
"def createConfig(couchDBName):\n\n PSetTweak = {'process': {'outputModules_': ['RECOoutput', 'ALCARECOoutput'],\n 'RECOoutput': {'dataset': {'dataTier': 'RECO',\n 'filterName': 'Filter'}},\n 'ALCARECOoutput': {'dataset': {'dataTier': 'ALCARECO',\n 'filterName': 'AlcaFilter'}}}}\n\n configCache = ConfigCache(os.environ[\"COUCHURL\"], couchDBName = couchDBName)\n configCache.createUserGroup(groupname = \"testGroup\", username = 'testOps')\n configCache.setPSetTweaks(PSetTweak = PSetTweak)\n configCache.save()\n\n return configCache.getCouchID()",
"def get_base_strategies():\n db_connect()\n query = \"SELECT * FROM base_strategies\"\n base_strats_df = pd.read_sql(query, con=db)\n db_close()\n\n base_strats = {}\n for i in xrange(len(base_strats_df.index)):\n base_strats[base_strats_df.iloc[i]['id']] = {}\n base_strats[base_strats_df.iloc[i]['id']]['name'] = base_strats_df.iloc[i]['base_strategy']\n base_strats[base_strats_df.iloc[i]['id']]['description'] = base_strats_df.iloc[i]['description']\n \n return base_strats",
"def prep_database(sqla):\n create_multiple_people(sqla, random.randint(5, 15))\n create_multiple_accounts(sqla)\n return [account.id for account in sqla.query(Account.id).all()]",
"def indsk_dbparams(db_dir, sel):\n dbparam1 = {'alias': \"DB1\",\n\n # `target_size` describes the original data size.\n # Used when reading data at db_dir via core iterators, but not `DskmanDskDataIterator` iterators.\n # Exception: Used in `DskmanDskBigDataIterator` when reading binary files.\n 'dskdb_param': {'db_dir': db_dir, 'target_size': (33, 33)},\n 'selection': sel}\n return dbparam1",
"def dump_config():\n print \"open db file %s for reading\" % config_file\n #config_db = dbm.open(config_file, 'c')\n #config_db.close()\n #with dbm.open(config_file, 'c') as db:\n db = dbm.open(config_file, 'c')\n for key in db.keys():\n print \"%s : %s\" % (key.decode(), db[key].decode())\n db.close()",
"def inmem_dbparams(nndb, sel):\n dbparam1 = {'alias': \"DB1\",\n 'memdb_param': {'nndb': nndb},\n 'selection': sel}\n return dbparam1",
"def _generate_schema(self):\n\n response = self._request('GET', CosmoSim.SCHEMA_URL,\n auth=(self.username, self.password),\n headers={'Accept': 'application/json'},\n cache=False)\n data = response.json()\n self.db_dict = {}\n for i in range(len(data['databases'])):\n self.db_dict[str(data['databases'][i]['name'])] = {}\n\n sstr = str(data['databases'][i]['name'])\n sid = str(data['databases'][i]['id'])\n self.db_dict[sstr]['id'] = sid\n sdesc = str(data['databases'][i]['description'])\n self.db_dict[sstr]['description'] = sdesc\n self.db_dict[sstr]['tables'] = {}\n for j in range(len(data['databases'][i]['tables'])):\n sstr2 = str(data['databases'][i]['tables'][j]['name'])\n self.db_dict[sstr]['tables'][sstr2] = {}\n sdata = data['databases'][i]['tables'][j]['id']\n self.db_dict[sstr]['tables'][sstr2]['id'] = sdata\n sdesc2 = data['databases'][i]['tables'][j]['description']\n self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2\n self.db_dict[sstr]['tables'][sstr2]['columns'] = {}\n tmpval = len(data['databases'][i]['tables'][j]['columns'])\n for k in range(tmpval):\n sdata2 = data['databases'][i]['tables'][j]['columns'][k]\n sdata2_id = sdata2['id']\n sstr3 = str(sdata2['name'])\n\n sdesc3 = sdata2['description']\n self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {\n 'id': sdata2_id,\n 'description': sdesc3}\n return response",
"def setup_database():\n database = {}\n\n # for filename in glob.glob(os.path.join(IMAGES_PATH, '')):\n for root, dirs, files in os.walk(IMAGES_PATH):\n for file in files:\n filename = os.path.join(IMAGES_PATH, file)\n # Load image\n image_rgb = face_recognition.load_image_file(filename)\n\n # Use name in filename as the identity key\n identity = os.path.splitext(os.path.basename(filename))[0]\n\n # Get face encoding and link it to the identity\n locations, encodings = get_face_embeddings_from_image(image_rgb)\n database[identity] = encodings[0]\n\n return database",
"def setup_sample_data(no_of_records):\n rows_in_database = [{'id': counter, 'name': get_random_string(string.ascii_lowercase, 20), 'dt': '2017-05-03'}\n for counter in range(0, no_of_records)]\n return rows_in_database",
"def get_random_db(self):\n rnd = random.random() * self.totals[-1]\n pool_index = bisect.bisect_right(self.totals, rnd)\n return list(self.pool)[pool_index]",
"def getdbconfig(runmode, dbmodelname):\r\n #print('getdbcofig : ' + runmode + \" \" + dbmodelname)\r\n try:\r\n path = os.path.split(os.path.realpath(__file__))[0]\r\n #print 'db.xml path: ' + path + \"/config/db.xml\"\r\n if (runmode != \"test\"):\r\n dbjsonfile = open(path + '/config/' + runmode + '/db.json', 'r')\r\n dbjson = json.load(dbjsonfile)\r\n #print('config file path : ' + path + \"/config/\" + runmode + \"/db.json\")\r\n else:\r\n dbjsonfile = open(path + '/config/db.json', 'r')\r\n dbjson = json.load(dbjsonfile)\r\n #print('config file path : ' + path + \"/config/db.json\")\r\n \r\n \r\n #print('from dbconfig file get : dbjson' + str(dbjson))\r\n for db in dbjson:\r\n #print db.getAttribute('id')\r\n if db['dbmodelname'] == dbmodelname:\r\n dbuser = db['userid']\r\n dbpasswd = db['password']\r\n dburl = db['dburl']\r\n dburlport = db['dburlport']\r\n dbname = db['dbname']\r\n break\r\n return (dbuser, dbpasswd, dburl, dburlport, dbname)\r\n except Exception as e:\r\n return ('except', str(e), '', '', '')",
"def db_10000(\n empty_graph_db: graph_tuple_database.Database,\n) -> graph_tuple_database.Database:\n # Generate some random graph tuples.\n graph_pool = [\n random_graph_tuple_database_generator.CreateRandomGraphTuple()\n for _ in range(128)\n ]\n\n # Generate a full list of graphs by randomly selecting from the graph pool.\n random_graph_tuples: List[graph_tuple_database.GraphTuple] = [\n copy.deepcopy(random.choice(graph_pool)) for _ in range(10000)\n ]\n # Index the random graphs by ir_id.\n for i, t in enumerate(random_graph_tuples):\n t.ir_id = i\n t.data_flow_steps = i\n\n with empty_graph_db.Session(commit=True) as s:\n s.add_all(random_graph_tuples)\n # Create the empty graph tuples. These should be ignored by the graph\n # reader.\n s.add_all(\n [\n graph_tuple_database.GraphTuple.CreateEmpty(0),\n graph_tuple_database.GraphTuple.CreateEmpty(0),\n ]\n )\n\n return empty_graph_db",
"def all_measurements_lookup(client):\n dbs_dict = db_lookup(client)\n m_list_dict = []\n for db in dbs_dict:\n m_list_dict.append({db['name']: measurements_lookup(client, db['name'])})\n # print(\"def all_measurements_lookup 1: \", m_list_dict[:10])\n return m_list_dict",
"def populate_db(dbase):\n # In this order: Iron, Blood, Shadow, Fel, Storm\n wowhead_ids = []\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-8))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-9))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-10))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-11))\n wowhead_ids.extend(item.get_ids_from_wowhead_by_type(-17))\n item_ids = set(wowhead_ids)\n print(item_ids)\n\n pos = 0\n for item_id in item_ids:\n if pos % 10 == 0:\n print(\"Relic %d of %d\" % (pos, len(item_ids)))\n pos += 1\n import_relic(dbase, item_id)",
"def getDetectorMapSpecs(\n dbname: str, nameprefix: str, criteria: SelectionCriteria,\n config: ConfigOverrides, *, maxarcs: int) -> List[Dict[str, Any]]:\n blocks = []\n for beamConfig in sorted(getBeamConfigs([\"scienceArc\"], dbname, criteria)):\n for arm in all_arms:\n sources = getSourcesFromDB(\"scienceArc\", arm, dbname, criteria, beamConfig=beamConfig)\n for srcs in splitSources(sources, maxarcs):\n calibBlock: Dict[str, Any] = {}\n\n if sources:\n calibBlock[\"detectorMap\"] = {\n \"id\": getSourceFilterFromListOfFileId(srcs)\n }\n calibBlock[\"detectorMap\"].update(config.toYaml(\"detectorMap\"))\n\n if calibBlock:\n # This name is not unique\n # but a serial number will be added to it\n # after a merge process.\n blocks.append(nameYamlMapping(f\"{nameprefix}{arm}\", calibBlock))\n\n return addSerialNumbersToNames(mergeCalibBlocks(blocks))",
"def generate_db(seed, c, e, s, st, sr, v):\n\n r.seed(seed) #initialize the random number generator with the seed\n sales_db = {} #make an empty dictionary\n\n for i in range(0,v):\n #populate a single record\n new_rec = {'city': r.sample(c, 1)[0], #draw a city\n 'employee' : r.sample(e, 1)[0], #draw a person\n 'sales' : round(r.triangular(s[0],s[1])), #draw a sale value\n 'stay' : round(r.uniform(st[0],st[1])) #how long did it take to close the deal\n }\n if r.random() > sr: #deal fell through\n new_rec['sales'] = 0\n\n #add record to db\n sales_db[i] = new_rec\n\n return sales_db",
"def generate_database():\n database = {}\n\n for name in images:\n descriptors = []\n for path in images[name]:\n descriptors.append(ConvertToDescriptor.jpeg_to_descriptors(myPath + path))\n database[name] = person.Person(name, descriptors)\n\n output = open('database.p', 'wb')\n pickle.dump(database, output)\n output.close()",
"def check_db_matches():\n FIRST_RUN = False\n #ALL_FILE = \"all_queries_big\"\n #DB_FILE = \"all_dbs_big\"\n ALL_FILE = \"all_queries\"\n DB_FILE = \"all_dbs\"\n START_FROM = \"number\"\n ALL_NUM = \"all_num_from_new\"\n ALL_NUM = \"all_num_from_4_5_full_17\"\n\n ALL_FIXED_q = \"all_fixed_queries\" + str(17)\n ALL_FIXED_dbs = \"all_fixed_dbs\" + str(17)\n biggest = 20\n max_db_size = 20\n all_queries = {}\n db = [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}]\n found = [0] * biggest\n ret_val = []\n if FIRST_RUN:\n #raw_input(\"are you sure you want to rewrite the db?!\")\n storage_main = FileStorage(INDEX_DIR_CODE)\n ix_main = storage_main.open_index()\n try:\n \"\"\"\n with open(START_FROM, \"rb\") as file_h:\n (curr_db, count, db_sizes) = pickle.load(file_h)\n with open(ALL_FIXED_q, \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(ALL_FIXED_dbs, \"rb\") as file_h:\n db = pickle.load(file_h)\n print len(all_queries.keys())\n print \"Real size\", [len(e.keys()) for e in db]\n print \"left\", db_sizes\n print curr_db, count\n \"\"\"\n with open(START_FROM, \"rb\") as file_h:\n (curr_db, count, db_sizes) = pickle.load(file_h)\n print \"read\", curr_db, count\n with open(ALL_FILE+str(curr_db - 1), \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(DB_FILE+str(curr_db - 1), \"rb\") as file_h:\n db = pickle.load(file_h)\n print \"Real size\", [len(e.keys()) for e in db]\n except:\n curr_db = 0\n count = 0\n db_sizes = [2 ** i for i in range(1, biggest + 1)]\n new_count = 0\n print \"start reading posts\"\n q_db = POSTS_DB.find({}, timeout=False)\n print \"done reading posts\"\n print \"start with\", curr_db\n for question in q_db:\n if curr_db == max_db_size:\n print \"break\"\n break\n new_count += 1\n if new_count < count:\n continue\n if db_sizes[curr_db] % 1000 == 0:\n print \"BUILD:\", curr_db, \"I'm Alive, more\", db_sizes[curr_db], \"togo!\"\n snips = get_possible_snippets(question['Id'])\n if snips is None or len(snips) == 0:\n continue\n (db[curr_db])[question['Id']] = snips[0]\n db_sizes = db_sizes[:curr_db] + [e-1 for e in db_sizes[curr_db:]]\n if db_sizes[curr_db] == 0:\n t = time.time()\n print \"find matches for\", curr_db, \"size is\", len(db[curr_db].keys())\n for place, key in enumerate(db[curr_db].keys()):\n if place % 1000 == 0:\n print \"FIND: I'm Alive\", place\n code = db[curr_db][key][0]\n res_dict, tokens, q_scores = fast_from_code_to_question(code, ix_main)\n if all_queries.get(key, None) is None:\n all_queries[key] = (tokens, res_dict)\n curr_db += 1\n try:\n print \"saved\", time.time() - t\n with open(ALL_FILE + str(curr_db), \"wb\") as file_h:\n pickle.dump(all_queries, file_h)\n with open(DB_FILE + str(curr_db), \"wb\") as file_h:\n pickle.dump(db, file_h)\n with open(START_FROM, \"wb\") as file_h:\n pickle.dump((curr_db, new_count, db_sizes), file_h)\n except:\n print \"to much to write\"\n print \"start\", 2 ** (curr_db + 1)\n q_db.close()\n num = 0\n else:\n print \"reading files..\"\n t = time.time()\n \"\"\"with open(ALL_FILE+str(max_db_size), \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(DB_FILE+str(max_db_size), \"rb\") as file_h:\n db = pickle.load(file_h)\"\"\"\n with open(ALL_FIXED_q, \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(ALL_FIXED_dbs, \"rb\") as file_h:\n db = pickle.load(file_h)\n print \"done reading\", time.time() - t\n print [len(e.keys()) for e in db]\n\n try:\n with open(ALL_NUM, \"rb\") as file_h:\n num, found = pickle.load(file_h)\n print \"read\", num, found\n except:\n num = 0\n\n curr_num = 0\n print num, len(all_queries.keys())\n for query in all_queries.keys():\n curr_num += 1\n if curr_num < num:\n continue\n if curr_num % 1000 == 0:\n print \"MATCHES: I'M Alive!\", curr_num, query\n\n matches = get_matches(query, all_queries[query])\n flag_f = False\n for match in matches:\n if flag_f:\n break\n for i in range(len(db)):\n if match in db[i].keys() and query in db[i].keys():\n found[i] += 1\n flag_f = True\n break\n\n if curr_num - 1 > num:\n with open(ALL_NUM, \"wb\") as file_h:\n pickle.dump((curr_num, found), file_h)\n print found\n \"\"\"\n #saved in _n\n small_db = [0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 8] # 3/5\n small_db = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 4] # 4/5\n for i, val in enumerate(small_db):\n try:\n found[i] += val\n except:\n print \"shorter db\"\n\n print found\"\"\"\n for i in range(len(found) - 1):\n found[i + 1] += found[i]\n print(found)\n for place, i in enumerate([2 ** i for i in range(1, max_db_size + 1)]):\n ret_val.append(float(found[place])/i * 100)\n print ret_val",
"def fix_db():\n ALL_FIXED_q = \"all_fixed_queries\"\n ALL_FIXED_dbs = \"all_fixed_dbs\"\n print \"reading files..\"\n with open(\"all_queries_up1111\", \"rb\") as file_h:\n all_queries11 = pickle.load(file_h)\n with open(\"all_dbs_up1111\", \"rb\") as file_h:\n dbs11 = pickle.load(file_h)\n print [len(e.keys()) for e in dbs11]\n with open(\"all_queries17\", \"rb\") as file_h:\n all_queries = pickle.load(file_h)\n with open(\"all_dbs17\", \"rb\") as file_h:\n dbs = pickle.load(file_h)\n\n print \"done reading..\"\n print \"all len1\", len(all_queries.keys())\n dbs = dbs11[0:11] + dbs[11:]\n\n print [len(e.keys()) for e in dbs]\n all_queries.update(all_queries11)\n print \"all len2\", len(all_queries.keys())\n print \"done update\"\n del all_queries11\n del dbs11\n print \"reading files 2..\"\n with open(\"all_queries16\", \"rb\") as file_h:\n all_queries16 = pickle.load(file_h)\n with open(\"all_dbs16\", \"rb\") as file_h:\n dbs16 = pickle.load(file_h)\n print \"done reading 2\"\n dbs = dbs[:15] + [dbs16[15]] + dbs[16:]\n print [len(e.keys()) for e in dbs]\n all_queries.update(all_queries16)\n print \"done update2\"\n print \"all len3\", len(all_queries.keys())\n with open(ALL_FIXED_q + str(17), \"wb\") as file_h:\n pickle.dump(all_queries, file_h)\n with open(ALL_FIXED_dbs + str(17), \"wb\") as file_h:\n pickle.dump(dbs, file_h)\n print \"saved!\"",
"def get_extra_couchdbs(config, couch_database_url):\n extra_dbs = {}\n for row in config:\n if isinstance(row, tuple):\n _, postfix = row\n extra_dbs[postfix] = '%s__%s' % (couch_database_url, postfix)\n\n return extra_dbs"
]
| [
"0.6318034",
"0.5625199",
"0.5595317",
"0.5581888",
"0.55571896",
"0.5533229",
"0.5471945",
"0.5468151",
"0.54543424",
"0.5430461",
"0.53637934",
"0.5325484",
"0.53159976",
"0.5307205",
"0.5294863",
"0.5281337",
"0.5266666",
"0.5261725",
"0.5257115",
"0.5245701",
"0.52363616",
"0.52309656",
"0.52179736",
"0.5204658",
"0.5187045",
"0.51860833",
"0.51782084",
"0.5175154",
"0.51749057",
"0.51556885"
]
| 0.67613614 | 0 |
Provides the reactive frontend | def frontend(request: HttpRequest) -> HttpResponse:
return render(request, "frontend/base.html", {}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n self.render_front()",
"def ui(self):\n return ui",
"def main():\r\n return render_template(\"UI.html\")",
"def controls():\n return render_template(\"controls/controls.html\")",
"def get_frontend(self):\n servers = self.get_frontends()\n assert servers, \"starter: don't have instances!\"\n return servers[0]",
"def __call__(self):\n self.show()",
"def view(self):",
"def app(self) -> traits.RESTAware:",
"def app(self) -> traits.RESTAware:",
"def index(self):\n return self.html",
"async def view():\n\n components = await get_data()\n content = load.RELOADICON.format('/reloadstore')\n content += load.SEARCHBAR\n\n for component in components:\n cardtitle = ''\n\n if not components[component]['trackable']:\n continue\n\n if components[component]['has_update']:\n cardtitle += load.UPDATEICON\n\n cardtitle += component\n\n needs_migration = await migration_needed(component)\n\n if needs_migration:\n cardtitle += load.TOOLTIP.format('Migration needed')\n\n elif not components[component]['embedded']:\n cardtitle += load.TOOLTIP.format('Not managable')\n\n cardcontent = load.META.format(\n type='author', text=components[component]['author']['login'])\n\n cardcontent += load.TEXT.format(components[component]['description'])\n cardbutton = load.LINK.format(\n url='/component/'+component, target='_self',\n style='', id='', htmlclass='', extra='', text='More info')\n\n content += load.BUTTON_CARD.format(\n title=cardtitle, content=cardcontent, buttons=cardbutton)\n\n html = load.TOP\n html += load.BASE.format(content)\n html += load.END\n\n return html",
"def home():\n data = read_data()\n return render_template(\n 'index.html',\n title='Raft Message Implementation: Client',\n content=data\n )",
"def update_controller(self):",
"def main(self: object) -> None:\n print(\"[View] main\")\n self.mainloop()",
"def index():\n try:\n # Retrieve a list of active clients from the BancBox API for \n # the right side bar.\n active_clients = api.get_active_clients()\n except Exception, e:\n active_clients = []\n logger.error('Error retrieving active clients: %s', e)\n return render_template('index.html', active_clients=active_clients)",
"async def component_view(request):\n from componentstore.view.component.component import view\n requester = request.headers.get('X-FORWARDED-FOR', None)\n component = request.match_info['component']\n print(\"Serving view for\", component, \"to\", requester)\n html = await view(component)\n return web.Response(body=html, content_type=\"text/html\", charset=\"utf-8\")",
"def client():",
"def get(self):\n # Not done yet, cause lazy to do front\n return render_template('ask.html')",
"def web():\n from mephisto.client.server import app\n\n app.run(debug=False)",
"def serve(self) -> str:\n return self._render()",
"def run(self) -> None:\n self._render()\n print(self.sio.getvalue())",
"def index(self):\n return render(\"/derived/rock/index.mako\")",
"def main():\n frontend_query(PATH, USER)",
"def serve_vue_app():\n return(render_template('index.html'))",
"def server(self, reactor):\n raise NotImplementedError()",
"def show(self):",
"def dispatcher(self):\n pass # pragma: no cover",
"def application():\n\n return render_template(\"application-form.html\")",
"def get(self):\n return self.render_template('index.html', quote=PollHandler.quote)",
"def index():\n return render_template('form.html')"
]
| [
"0.60004795",
"0.58342993",
"0.5755755",
"0.56324667",
"0.5516678",
"0.55063325",
"0.5504352",
"0.5487491",
"0.5487491",
"0.54707456",
"0.5466322",
"0.5448678",
"0.5448638",
"0.5444742",
"0.5438025",
"0.54327214",
"0.5402335",
"0.53761554",
"0.5369238",
"0.5361649",
"0.5300719",
"0.5297367",
"0.529463",
"0.52924734",
"0.5280867",
"0.5273841",
"0.5268321",
"0.52589273",
"0.5258224",
"0.5254863"
]
| 0.6215078 | 0 |
whatever arguments g is supplied, pass them through to f | def g(*args, **kwargs):
print "*args :"
print args
print "**kwargs :"
print kwargs
return 2 * f(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def g():",
"def fG(self):\n pass",
"def both(f, g):\n def say(score0, score1):\n return both(f(score0, score1), g(score0, score1))\n return say",
"def f(a, b):",
"def doubler_correct(f):\n def g(*args, **kwargs):\n \"\"\"whatever arguments g is supplied, pass them through to f\"\"\"\n print \"*args :\" \n print args\n print \"**kwargs :\" \n print kwargs\n return 2 * f(*args, **kwargs)\n\n return g",
"def compose_expanded_args(f,g):\n def composed(*args):\n return f(*(g(*args)))\n\n return composed",
"def call(f):\n def g(*args, **kwds):\n return (f, args, kwds)\n return g",
"def f(inicio,obj):\n return g(inicio,obj)+h(inicio,obj)",
"def assemble_g(self, residuals, dofs, args, g):\n return self._build_g(residuals, dofs, args, g)",
"def _build_g(residuals, dofs, args, g):\n values = [res(*(arg[dofs_i] for arg in args)) for res, dofs_i in zip(residuals, dofs)]\n if len(values) != 0:\n g[:] = np.concatenate(values)\n return g",
"def f():",
"def f():",
"def __init__ (self, f, g):\n self.f = f\n self.g = g\n pass",
"def simple_back_line(x, p, alpha, rho, mu, f, g, args_f = [], args_g = []):\r\n i = 1\r\n if not args_f and not args_g: #if f and g are not function of any additional arguments\r\n while f(x + alpha*p) > (f(x) + mu*alpha*np.dot(g(x),p)):\r\n alpha = rho*alpha\r\n i = i + 1\r\n return alpha, i\r\n if args_f and not args_g: #if g is not a function of any additional arguments but f is\r\n while f((x + alpha*p), args_f) > (f(x, args_f) + mu*alpha*np.dot(g(x),p)):\r\n alpha = rho*alpha\r\n i = i + 1\r\n return alpha, i\r\n if not args_f and args_g: #if f is not a function of any additional arguments but g is\r\n while f(x + alpha*p) > (f(x) + mu*alpha*np.dot(g(x,args_g),p)):\r\n alpha = rho*alpha\r\n i = i + 1\r\n return alpha, i\r\n if args_f and args_g: #if both f and g are functions of additional arguments\r\n while f((x + alpha*p),args_f) > (f(x, args_f) + mu*alpha*g(x, args_g)):\r\n alpha = rho*alpha\r\n i = i + 1\r\n return alpha, i",
"def evaluate(self, g):\n pass",
"def f(self):\n return self.g() + self.h()",
"def add_fns(f_and_df, g_and_dg):\n \"*** YOUR CODE HERE ***\"",
"def f(x, a, b):\n return f_raw(x, a, b)",
"def compose(f, g):\n return lambda *args, **kwargs: f(g(*args, **kwargs))",
"def fvp(self, xs, gs, **kwargs):\n raise NotImplementedError",
"def execute(self, *f_args):\n selection = self._selections[int(math.floor(f_args[0]))].name()\n representation = self._representations[int(\n math.floor(f_args[1]))].name()\n mutation = self._mutations[int(math.floor(f_args[2]))].name()\n crossover = self._crossovers[int(math.floor(f_args[3]))].name()\n\n population = int(round(f_args[4]))\n selection_crossover = f_args[5]\n selection_mutation = f_args[6]\n generations = int(math.floor(f_args[7]))\n precision = int(round(f_args[8]))\n max_retry = int(round(f_args[9]))\n\n values = {}\n args = collections.namedtuple(\n \"args\",\n [\"precision\", \"threads\", \"dimensions\",\n \"selection\", \"representation\", \"crossover\", \"mutation\",\n \"population\", \"selection_mutation\", \"selection_crossover\",\n \"generations\", \"max_retry\"])\n\n for function_cls in self._functions:\n values[function_cls] = {}\n for dimension in range(1, 2):\n # prepare new alg\n alg = basic_ag.BaseAG(\n selection=selection,\n representation=representation,\n mutation=mutation,\n crossover=crossover,\n population=population,\n selection_crossover=selection_crossover,\n selection_mutation=selection_mutation,\n generations=generations,\n dimension=dimension,\n precision=precision)\n\n fabicrated_args = args(\n precision=precision, max_retry=max_retry,\n dimensions=dimension, threads=5,\n selection=selection,\n representation=representation,\n mutation=mutation,\n crossover=crossover,\n population=population,\n selection_crossover=selection_crossover,\n selection_mutation=selection_mutation,\n generations=generations)\n alg.set_args(fabicrated_args)\n\n function_cls.set_args(fabicrated_args)\n function = function_cls(dimension=dimension)\n\n rez = alg(function)\n info = alg.get_info()\n\n values[function_cls][dimension] = (\n rez, function.local_mins, info, fabicrated_args)\n\n return self._get_value(values)",
"def g(x, y=1):\r\n return x ** 2 + y",
"def evaluate(self, g):\n raise NotImplementedError",
"def compose1(f, g):\n def h(x):\n return f(g(x))\n return h",
"def g(f, x: float):\n return lambda x: f(x + f(x)) / f(x) - 1",
"def half_gcdex(f, g):\n lev, dom, per, F, G = f.unify(g)\n s, h = dmp_half_gcdex(F, G, dom)\n return per(s), per(h)",
"def mapg(f, C):\n for x in C:\n yield f(x)",
"def f_twoargs(self, arg1, arg2) :\n pass",
"def function(args):\n pass",
"def G(self, (k,t), (j,x), **params):\n return 0"
]
| [
"0.7128418",
"0.6589972",
"0.6545905",
"0.6483205",
"0.6384204",
"0.6329086",
"0.61682737",
"0.61640275",
"0.6125645",
"0.60679084",
"0.602055",
"0.602055",
"0.59499735",
"0.5904644",
"0.5897822",
"0.5890254",
"0.58226633",
"0.5797996",
"0.57459503",
"0.5721009",
"0.570922",
"0.570831",
"0.5673839",
"0.56669337",
"0.56312793",
"0.55579907",
"0.5545111",
"0.5534689",
"0.5525248",
"0.5521079"
]
| 0.7366032 | 0 |
Sets the number of rows, and columns, also initializes the list of squares cleans | def __init__(self, rows: int = 1, columns: int = 2):
super().__init__()
self.__squares = [[Floor._clean for i in range(columns)] for j in range(rows)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.rows = None\n self.columns = None\n self.squares = None\n # max is useful as a way to track range for iteration, and also as a way\n # to track the maximum number in any spot.\n self.max = 0",
"def __init__(self, size, given_cells):\n self.ROWS = string.ascii_uppercase[:size ** 2]\n self.COLS = [str(i) for i in range(1, size ** 2)]\n self.size = size\n self.given_cells = given_cells\n self.board = self.create_board()\n self.squares = [utility.cross(i, j) for i in [self.ROWS[i:i + size] for i in range(0, len(self.ROWS), size)]\n for j in [self.COLS[i:i + size] for i in range(0, len(self.COLS), size)]]\n self.attach_neighbors()\n self.update_neighbor_values_by_given()\n print(\"Initial board:\")\n GUI.print_sudoku(self.board, self.size)",
"def __init__(self) -> None:\n self.row = 6\n self.col = 7\n self.grid = []\n\n for y in range(self.row):\n temp_row = []\n for x in range(self.col):\n temp_row.append(\" \")\n self.grid.append(temp_row)",
"def reset(self):\n # replace with your code\n self._grid = [[0 for dummy_column in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n for dummy_num in range(2):\n self.new_tile()",
"def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)",
"def reset(self):\r\n # creating the grid with the values all initialized to zero\r\n \r\n self._grid = [[ 0 for dummy_col in range(self._width)]\r\n for dummy_row in range(self._height)]\r\n # introducing the two initial tiles\r\n self.new_tile()\r\n self.new_tile()\r\n #for testing purposes\r\n #print self.grid\r\n #print self\r",
"def __init__(self, grid_height, grid_width):\n self._grid_height = grid_height\n self._grid_width = grid_width\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]",
"def reset(self):\n # self.grid = [[0] * self.grid_width] * self.grid_height\n self.grid = []\n for dummy_row in range(self.grid_height):\n new_row = []\n for dummy_col in range(self.grid_width):\n new_row.append(0)\n self.grid.append(new_row)\n self.new_tile()\n self.new_tile()",
"def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]",
"def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]",
"def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]",
"def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1",
"def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()",
"def __init__(self, puzzle_height, puzzle_width, initial_grid = None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]",
"def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]",
"def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]",
"def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]",
"def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]",
"def __init__(self, width = 7, height = 7):\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]",
"def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()",
"def reset(self):\n # replace with your code\n self._grid = [[0] * self._width for _ in xrange(self._height)]\n self.new_tile()\n self.new_tile()",
"def __init__(self, squares=None, ncols=8, nrows=8):\n self.ncols = ncols\n self.nrows = nrows\n\n if not squares:\n self.squares = dict((i, None) for i in xrange(ncols * nrows))\n\n # 0 begins as the top of the board, making it black\n for i in xrange(ncols * 3):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"black\")\n # red would be the bottom 3 rows\n for i in xrange(ncols * (nrows - 3), ncols * nrows):\n row, col = i // ncols, i % ncols\n if row % 2 == col % 2:\n self.squares[i] = Piece(\"red\")",
"def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True",
"def clean_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._clean",
"def reset(self):\r\n self.grid = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self):\n self._grid = [[0 for dummy_col in range(self._width)]\n for dummy_row in range(self._height)]\n self.new_tile()\n self.new_tile()",
"def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()",
"def set_possible(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n if self.content[row][col] == \"0\":\n self.possibles[row][col] = set(self.digits)\n poss = set(self.possibles[row][col])\n r_num = self.get_row(row)\n c_num = self.get_column(col)\n s_num = self.get_square_cell((row, col))\n for num in poss:\n if num in r_num or num in c_num or num in s_num:\n self.possibles[row][col].remove(num)\n else:\n self.possibles[row][col] = set()",
"def __init__(self, puzzle):\n # Split the given string input and find the side length and block size of the puzzle\n puz = [int(i) for i in puzzle.split(' ') if i]\n self.sl = int(math.sqrt(len(puz))) \n self.bs = int(math.sqrt(self.sl))\n\n # If side length squared not the same length as total puzzle, or if side lengths\n # not a square length, raise error\n if not (self.sl**2 == len(puz)) or not (self.bs**2 == self.sl):\n raise Sudoku_Errors.InvalidPuzzleException(puzzle, \"Puzzle side lengths not a perfect square\")\n\n # For each value in the puzzle, if not in correct range, raise error\n for ind in range(len(puz)):\n row = ind // self.sl\n col = ind % self.sl\n if not (0 <= puz[ind] <= self.sl):\n raise Sudoku_Errors.InvalidPuzzleException(puzzle,\n \"Puzzle value at ({}, {}) is out of range in puzzle \\n{}\".format(row, col, puzzle))\n\n # Split string by spaces into single list\n self.puzzle = [[j for j in puz[(i*self.sl):(i*self.sl)+self.sl]] for i in range(self.sl)]\n\n # For each value in the puzzle, check that it is a valid value for that square\n for row in range(self.sl):\n for col in range(self.sl):\n # This temporary replacing of each value with 0 is a trick so that\n # the valid_square method can be used on every square\n val = self.puzzle[row][col]\n self.puzzle[row][col] = 0\n\n if not self.valid_square(row, col, val):\n # If not a valid puzzle, reset self.puzzle and raise error\n self.puzzle = None\n raise Sudoku_Errors.InvalidPuzzleException(puzzle,\n \"Puzzle value at ({}, {}) is incorrect in puzzle \\n{}\".format(row, col, puzzle))\n\n # If value is valid, replace that square with prior value that was input\n self.puzzle[row][col] = val"
]
| [
"0.6878316",
"0.68263984",
"0.67410505",
"0.66670257",
"0.665368",
"0.66346514",
"0.6631932",
"0.651728",
"0.65080005",
"0.65080005",
"0.65080005",
"0.65069294",
"0.6500104",
"0.6474858",
"0.645611",
"0.6451403",
"0.64415956",
"0.64415956",
"0.6441014",
"0.6431781",
"0.64129174",
"0.63968796",
"0.6395405",
"0.63829094",
"0.6379664",
"0.6365942",
"0.6354629",
"0.634675",
"0.63463444",
"0.6335321"
]
| 0.7394635 | 0 |
Returns the list of squares | def squares(self) -> list:
return self.__squares | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_squares(self):\n squares_lst = []\n row, col = 0, 0\n while row < self.board_size:\n while col < self.board_size:\n square = self.add_square(row, col)\n squares_lst.append(square)\n col += self.c_size\n row += self.r_size\n col = 0\n return squares_lst",
"def squares():\n return [i for i in xrange(11, 89) if 1 <= (i % 10) <= 8]",
"def find_squares(self):\n\t\tself.squares = [x for x in range(self.low, self.high + 1) if sqrt(x) == int(sqrt(x))]",
"def my_squares(iters):\n out = [i ** 2 for i in range(iters)]\n return out",
"def squares(s):\n\t\"*** YOUR CODE HERE ***\"\n\treturn [int(i**0.5) for i in s if round(i ** 0.5) ** 2 == i ]",
"def my_squares(iters):\n out = []\n for i in range(iters):\n out.append(i ** 2)\n return out",
"def get_square_list(self, squ):\n square = []\n for cell in squ:\n square.append(self.content[cell[0]][cell[1]])\n return square",
"def squares(self):\n ship_length = self.type\n\n squares = [self.star_square]\n for step in range(1, ship_length):\n relative_square = self.get_square_at_relative_position(\n self.star_square, self.orientation, stepped_squares=step)\n squares.append(relative_square)\n return squares",
"def get_squares(x, y, width, height):\n return product(range(x+1, x+width+1), range(y+1, y+height+1))",
"def squares(s):\n\n \"*** YOUR CODE HERE ***\"\n return [int(x**(1/2)) for x in s if x**(1/2) == round(x**(1/2))]",
"def get_squares(n):\n\n return sum([i * i for i in range(n)])",
"def squares(s):\n \"*** YOUR CODE HERE ***\"\n result = []\n for num in s:\n sr = round(math.sqrt(num))\n if sr * sr == num:\n result.append(sr)\n return result",
"def _get_square(self, start_row, start_col):\n end_row = start_row + self.block_size\n end_col = start_col + self.block_size\n\n result = np.array(self.board)[start_row:end_row,\n start_col:end_col]\n return result.tolist()",
"def availableSquares(self):\n List2=[]\n for item in self.all:\n if item.retrieve()==\"\":\n List2.append(item.name())\n return List2",
"def squares(cls) -> FieldArray:\n x = cls.elements\n is_square = x.is_square()\n return x[is_square] # pylint: disable=unsubscriptable-object",
"def get_square(self, index):\n square = []\n for cell in self.squares[index]:\n square.append(self.content[cell[0]][cell[1]])\n return square",
"def make_square(x, size):\n return [ [x, -size/2, size/2],\n\t\t\t [x, size/2, size/2],\n [x, size/2, -size/2],\n\t\t\t [x, -size/2, -size/2]]",
"def sorted_squares(l: list) -> list:\n nl = []\n n = len(l)\n front_ptr, end_ptr = 0, n - 1\n while front_ptr <= end_ptr:\n front = l[front_ptr] ** 2\n end = l[end_ptr] ** 2\n if front > end:\n nl.insert(0, front)\n front_ptr += 1\n else:\n nl.insert(0, end)\n end_ptr -= 1\n\n return nl",
"def square_nums(number_list):",
"def squared(num_list):\n new_list=[]\n for num in num_list:\n sq_num=pow(num,2)\n new_list.append(sq_num)\n return new_list",
"def subsquares(x):\n return subsquares.subsquares(x)",
"def square(i, j):\n return map(sq_start, [i, j, i + 1, j + 1])",
"def playable_squares(self) -> Set[Square]:\n squares = set()\n for col in range(len(self.state[0][0])):\n square = self.playable_square(col)\n if square is not None:\n squares.add(square)\n return squares",
"def square(numbers):\n\n # Needs only one argument\n newlist = []\n for num in numbers:\n newlist.append(num*num)\n return newlist",
"def intList(self):\n\n board = []\n\n for m in self.squares:\n board.append(convertMarker(m))\n\n return board",
"def list_squared(start, stop):\n result = []\n\n for num in range(start, stop):\n divisors = set(chain.from_iterable((\n [i, num/i] for i in range(1, int(math.sqrt(num)) + 1)\n if num % i == 0\n )))\n divisor_squares = [x*x for x in divisors]\n divisor_squares_sum = sum(divisor_squares)\n if math.sqrt(divisor_squares_sum).is_integer():\n result.append([num, divisor_squares_sum])\n\n return result",
"def square(diameter: int, top: int = 0, left: int = 0) -> List['GridQubit']:\n return GridQubit.rect(diameter, diameter, top=top, left=left)",
"def rep_as_squares(N):\n reps = []\n stop = int((N/2)**0.5) + 1 # a must be less than \\sqrt{N/2}\n for a in range(1,stop):\n b_squared = N - a**2\n if is_square(b_squared):\n b = round((b_squared)**(0.5))\n reps.append([a,b])\n return reps",
"def squareGrid(row, col, grid):\n # position of the square box\n marker1, marker2 = (row//3)*3, (col//3)*3\n\n square = [grid[i][(marker2):(marker2+3)] for i in range(marker1,marker1+3)]\n\n # flattening the list\n square = [j for i in square for j in i]\n\n return square",
"def squares(upper=10**5):\n nums = [True] * (upper + 1)\n nums[0] = False\n for i in range(2, (upper + 3) / 2):\n sq = i * i\n for j in range(sq, upper + 1, sq):\n nums[j] = False\n return nums"
]
| [
"0.8269525",
"0.7885707",
"0.78549707",
"0.7667481",
"0.76411295",
"0.7637544",
"0.7627698",
"0.75142777",
"0.74460745",
"0.7321256",
"0.7217469",
"0.7084724",
"0.7050701",
"0.6999946",
"0.6940583",
"0.69208527",
"0.6920305",
"0.68529856",
"0.6828635",
"0.67976826",
"0.67422473",
"0.6737359",
"0.6706026",
"0.670415",
"0.670045",
"0.66617405",
"0.6656131",
"0.6632437",
"0.6602681",
"0.6514877"
]
| 0.83499897 | 0 |
Returns True if the floor of the room is dirty, otherwise False, if the row or column does not exist it throws an exception of type ErrorValue | def is_dirty(self, row: int, column: int) -> int:
return 1 if self.__squares[row][column] == Floor._dirty else 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid(self):\n if self.get_row() != -1 and self.get_column() != -1:\n return True\n else:\n return False",
"def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True",
"def is_valid_row(self):\r\n return self.valid_row",
"def isdirty(self):\n\n return not not self._olddata",
"def isValid(self):\n for ir in range(self.nRow): # Check rows for duplicates\n row = ir + 1\n vals = {}\n for ic in range(self.nCol):\n col = ic + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"doing row {row} at col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n \n for ic in range(self.nCol): # Check cols for duplicates\n col = ic + 1\n vals = {}\n for ir in range(self.nRow):\n row = ir + 1\n val = self.getCellVal(row=row, col=col)\n if not self.isEmpty(val):\n if val in vals:\n SlTrace.lg(f\"at row={row} doing col={col} val={val} vals={vals} invalid\")\n SlTrace.lg(f\"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}\")\n return False\n vals[val] = val\n return True",
"def isDirty(self):\n\t#@DEBUG christophe have to fix denoising optionnal issue prior to set isDirty() to True\n return False",
"def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny",
"def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])",
"def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY",
"def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')",
"def is_valid(self,row,col) -> bool:\n if(row >=0 and col>=0 and row<self.row and col<self.col and self.array[row][col]==-1 ):\n return True\n return False",
"def is_legal(self):\n if not self._is_valid():\n return False\n\n if not self._check_all(self.get_rows()):\n return False\n\n if not self._check_all(self.get_cols()):\n return False\n\n if not self._check_all(self.get_blocks()):\n return False\n\n return True",
"def valid(self):\r\n if self.file_exists and len(self.missing_columns) == 0 and len(self.veg_columns) > 0 and \\\r\n len(self.lat_errors) == 0 and len(self.lon_errors) == 0 and len(self.time_errors) == 0 and len(self.date_errors) == 0:\r\n return True\r\n else:\r\n return False",
"def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True",
"def dirty(self):\n return not self.consistent",
"def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col",
"def isTileCleaned(self, m, n):\n posx = math.floor(m)\n posy = math.floor(n)\n\n cleanOrDirty = 0 #dirty value\n if (posx,posy) in self.tiles.keys():\n cleanOrDirty = self.tiles[(posx, posy)]\n #print \"pos key found - clean or dirty value = \" + str(cleanOrDirty)\n if (cleanOrDirty == 1):\n return True\n else:\n return False\n else:\n #print \"pos key NOT found!\"\n return False\n\n #raise NotImplementedError",
"def is_board_valid(bd):\n return is_rows_valid(bd) and is_cols_valid(bd) and is_sqrs_valid(bd)",
"def _check_is_editable(self, raise_error: bool = True) -> bool:",
"def is_dirty(self):\n return self.dirty",
"def is_dirty(self):\n return self.dirty",
"def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True",
"def is_complete(self):\n for i in range(9):\n if len(self.rows[i]) != 0 or len(self.columns[i]) != 0 or len(self.groups[i]) != 0:\n return False\n\n for row in self.board:\n for col in row:\n if col == self.empty_cell_flag:\n return False\n\n return True",
"def all_clean(room):\r\n\r\n for row in room:\r\n for cell in row:\r\n if cell == \"dirt\":\r\n return False\r\n\r\n return True",
"def __check_write_success(self, update_data, path, method):\n #Initiate line-counter\n current_line = 1\n #Calculate what line will have the row we are looking for.\n looking_for_line = self.__row_id_in_file(list(update_data)[0])\n row_id = list(update_data)[0]\n #open file at path\n with open(path, 'r') as f:\n for line in f:\n if current_line == looking_for_line:\n if method == 'update':\n if json.loads(line)['row_id'] == row_id and json.loads(line)['data'] == update_data[row_id]:\n return True\n elif method == 'delete':\n if line == '\\n':\n return True\n current_line += 1\n #There was a problem writing, so return false\n return False",
"def is_valid(problem, i, j, e):\n row_map = row_maps[i]\n column_map = column_maps[j]\n sector_map = sector_maps[get_sector_number(i, j)]\n not_in_row = row_map[e-1] == 0\n not_in_column = column_map[e-1] == 0\n not_in_sector = sector_map[e-1] == 0\n\n return not_in_row and not_in_column and not_in_sector",
"def isDirty(*args, connection: bool=True, datablock: bool=True, **kwargs)->bool:\n pass",
"def check(self):\n return self.tile==\"\"",
"def dirty(self):\n return self._orig_line is not None",
"def board_tiles_availability(self):\n for row in range(GameData.rows):\n for col in range(GameData.columns):\n if self.board[row][col] == 0:\n return False\n # Game is draw, no more moves left!\n return True"
]
| [
"0.6817107",
"0.6212588",
"0.6095775",
"0.6038184",
"0.60086036",
"0.58937573",
"0.5883348",
"0.58525366",
"0.5849999",
"0.5797153",
"0.57900304",
"0.5774223",
"0.5740969",
"0.5708654",
"0.57017034",
"0.5701092",
"0.5672823",
"0.56613964",
"0.5657549",
"0.5624911",
"0.5624911",
"0.56159693",
"0.560304",
"0.55907494",
"0.5589071",
"0.5588829",
"0.55797803",
"0.5576768",
"0.55678535",
"0.55552125"
]
| 0.77172345 | 0 |
Cleans the floor of a room, if the row or column does not exist it throws an exception of type ErrorValue | def clean_square(self, row: int, column: int) -> None:
self.__squares[row][column] = Floor._clean | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updatePositionAndClean(self):\n #X direction (num. rows) wall limit is the width of rectangular room\n #Y direction (num. cols) wall limit is the height of rectangular room\n #So (0,0) is in bottom LEFT corner--since rows start at zero at BOTTOM, not top\n #direction works as you would think, with east at 0 or 360 degrees, 90 degrees at north,\n #180 degrees at west, and 270 degrees at south direction\n\n #so each time unit, getNewPosition in SAME direction if you don't hit the wall\n #if you hit the wall, then get a new RANDOM direction and then recalculate new position,\n #making sure it is a valid position on grid, has not already been cleaned (tile visited)\n\n #So it makes no difference which direction you are moving in--the getNewPosition() function\n #figures out mathematically what the next position is, based on grid, and you just have to\n #determine whether you have hit the wall in that same direction--don't have to look at the\n #number the degrees or radians in that particular direction--just moving in same direction,\n #get next position, do you hit the wall, if so get new random direction, move that way, if you\n #won't hit a wall that way.\n\n #If you don't hit a wall when you calculate a new direction, but the tile is clean already, then\n #just go through the tiles and find one that is not clean yet, and move in the same direction.\n \n robotPos = self.getRobotPosition()\n posx = robotPos.getX()\n posy = robotPos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n #First check if this position is clean:\n #if (self.room.isTileCleaned(posx,posy) == False):\n #then clean this tile!\n #self.room.cleanTileAtPosition(robotPos)\n #Now see where to move robot next on floor and clean that tile if it is not clean\n #So first try moving in same direction--will you hit a wall?\n newPos = self.position.getNewPosition(self.direction,self.speed)\n newPosx = newPos.getX()\n newPosy = newPos.getY()\n newPosx = math.floor(newPosx)\n newPosy = math.floor(newPosy)\n if (self.room.isPositionInRoom(newPos)) and (self.room.isTileCleaned(newPosx,newPosy) == False):\n #position is in room AND the tile has NOT been visited yet--since it's still DIRTY\n #Should NOT have to check whether you hit a wall, since new position is in room\n #so NO NEW DIRECTION needed yet--move in SAME direction\n self.setRobotPosition(newPos)\n self.room.cleanTileAtPosition(newPos)\n #print \"Moved in SAME DIRECTION I was moving in last time, direction = \" + str(self.direction)\n else: # (self.room.isPositionInRoom(newPos) == False) or (self.room.isTileCleaned(newPosx, newPosy) == True):\n # either HIT WALL -- OR -- tile already cleaned -- so calculate new RANDOM direction\n\n #NOTE: this works until you are surrounded by tiles that have no next step tile that has not already been\n #cleaned?\n #?? think a problem is that if all surrounding tiles are already clean, then, in that case,\n #you can get stuck in situation where you keep recalculating a new random direction, but when you take a step,\n #all the next tiles have already been cleaned, and you get stuck in a loop, so in this case, you must\n #not recalculate a new direction, but rather keep going in same direction until you find a tile not clean,\n #and jump to that tile instead, and go from there.\n #So find this case--see if that corrects this issue!\n \n keepTryingNewDirection = True\n while (keepTryingNewDirection == True):\n self.direction = random.randrange(0,359) #get new random direction\n newPos = self.position.getNewPosition(self.direction,self.speed) #get new next position step with new direc.\n newPosx = newPos.getX()\n newPosy = newPos.getY()\n newPosx = math.floor(newPosx)\n newPosy = math.floor(newPosy)\n if (self.room.isPositionInRoom(newPos)) and (self.room.isTileCleaned(newPosx,newPosy) == False):\n #new position in new direction is in room, and the tile has not been cleaned yet\n #so new direction and new tile to clean found!\n self.setRobotPosition(newPos)\n self.room.cleanTileAtPosition(newPos)\n #print \"Moved in NEW DIRECTION I was moving in last time, direction = \" + str(self.direction)\n keepTryingNewDirection = False\n elif (self.room.isPositionInRoom(newPos) == False):\n #new position in new direction NOT in room -- try again!\n #print \"new direction found a new position not in room --hit wall--try again! direction = \" + str(self.direction)\n continue\n else:\n #print \"new direction produced new position in room but tile already clean--try again?! direction = \" + str(self.direction)\n #print \"first check to see if all tiles have already been cleaned.\"\n #?? Any other checks needed here? list of tiles visited? is this really needed??\n #calculate list of cells not clean yet\n tilesCleaned = []\n allSurroundingTilesClean = False\n foundTileUnclean = False\n saveWidth = 0\n saveHeight = 0\n for i in range(0,self.room.width):\n for j in range(0,self.room.height):\n if (self.room.isTileCleaned(i,j) == False):\n saveWidth = i\n saveHeight = j\n foundTileUnclean = True\n else:\n #print \"appending to tiles cleaned: tile: i = \" + str(i) + \" j = \" + str(j)\n tilesCleaned.append((i,j)) #make list of tiles cleaned\n if (foundTileUnclean == True):\n #print \"not all tiles are clean!--start here rather than getting new direc. i = \" + str(saveWidth) + \" j = \" + str(saveHeight)\n newPos = Position(saveWidth,saveHeight)\n self.setRobotPosition(newPos)\n self.room.cleanTileAtPosition(newPos)\n #print \"Found new tile that was not clean! current direc. \" + str(self.direction)\n #print \"tile location x = \" + str(saveWidth) + \" y = \" + str(saveHeight)\n keepTryingNewDirection = False\n else:\n keepTryingNewDirection = False\n #print \"all tiles clean! stop cleaning!-- do not look for new direction! should be done.\"\n\n #for tile in tilesCleaned:\n #print tile",
"def cleanTileAtPosition(self, pos):\n #convert pos to grid reference.\n #check if grid reference is in tilesCleaned\n self.x = math.floor(pos.getX())\n self.y = math.floor(pos.getY())\n if (self.x, self.y) not in self.tilesCleaned:\n self.tilesCleaned.append((self.x, self.y))\n #self.room[pos.getX()][pos.getY()]==0 #set position to clean (array element = 0)\n #this method does not return anything.\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html",
"def check_and_clear_rows(self):\n # if board is full, then there will be a '#' in the first row\n if '#' in self.board[0]:\n return 'Game Over! Top has been reached.'\n for row in xrange(self.height):\n # if any given row is full, then that row won't have any blank spaces\n if not ' ' in self.board[row]:\n del self.board[row]\n self.board.insert(0, [' '] * self.width)",
"def all_clean(room):\r\n\r\n for row in room:\r\n for cell in row:\r\n if cell == \"dirt\":\r\n return False\r\n\r\n return True",
"def fake_clean_row(row):\n\treturn row",
"def uncover_blanks(self, row, col):\n checked = {}\n to_be_checked = []\n to_be_checked.append((row, col))\n while len(to_be_checked) > 0:\n sq_row, sq_col = to_be_checked.pop()\n if checked.has_key((sq_row, sq_col)):\n continue\n checked[(sq_row, sq_col)] = True\n if not self.valid_square(sq_row, sq_col):\n continue\n if self.array[sq_row][sq_col].visible is True:\n continue\n square = self.array[sq_row][sq_col]\n square.visible = True\n self.squares_left -= 1\n if square.type == SquareType.BLANK:\n start_row = sq_row-1\n start_col = sq_col-1\n end_row = sq_row+1\n end_col = sq_col+1\n for i in range(start_row, end_row+1):\n for j in range(start_col, end_col+1):\n if not checked.has_key((i, j)):\n to_be_checked.append((i, j))",
"def repairWall(self, game_state):\n first_row = [[0, 13], [1, 13],[2, 13],[3, 13],[4, 13],[5, 13],[6, 13],[7, 13],[8, 13],[9, 13],[10, 13],[11, 13],[12, 13],[13, 13],[15, 13],[16, 13],[17, 13],[18, 13],[19, 13],[20, 13],[21, 13],[22, 13],[23, 13],[24, 13],[25, 13],[26, 13],[27, 13]]\n destructor_loc1 = [[12,11], [16,11]]\n second_row = [[13, 12],[15, 12],[12, 12],[16, 12],[11, 12],[17, 12],[1, 12],[2, 12],[3, 12],[4, 12],[5, 12],[6, 12],[7, 12],[8, 12],[9, 12],[10, 12],[18, 12],[19, 12],[20, 12],[21, 12],[22, 12],[23, 12],[24, 12],[25, 12],[26, 12]]\n destructor_loc2 = [[8,11], [20,11]]\n encryptor_loc1 = [[13,11], [15,11]]\n destructor_loc3 = [[4,11], [24,11]]\n encryptor_row1 = [[13,10], [15,10]]\n destructor_row1 = [[12,10], [16,10]]\n encryptor_row2 = [[13,9], [15,9]]\n destructor_row2 = [[12,9], [16,9]]\n encryptor_row3 = [[13,8], [15,8]]\n destructor_row3 = [[12,8], [16,8]]\n\n for location in first_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in second_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_loc1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_loc3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row2:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row3:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)",
"def dirty_squares(self) -> None:\n row = ran(0, self.__squares.__len__() - 1)\n column = ran(0, self.__squares[0].__len__() - 1)\n self.__squares[row][column] = Floor._dirty\n print(\"Ensuciamos el piso y quedo así: \", self.__str__())",
"def _open(self, row: int, col: int) -> None:\n\n this_space = self._lookup[(row, col)]\n\n # remove this space from unknowns\n if this_space.position in self._unknowns:\n self._unknowns.pop(this_space.position)\n\n # open this space\n n_hinted = open(row, col)\n this_space.hint = str(n_hinted)\n n_marked = sum(\n 1 for neighbor in this_space.neighbors.values() if neighbor and self._lookup[neighbor].hint == 'x')\n this_space.num_undiscovered = n_hinted - n_marked\n\n # open safe neighbors\n if this_space.num_undiscovered == 0:\n safe_by_proxy = {neighbor for neighbor in this_space.neighbors.values() if\n neighbor and self._lookup[neighbor].hint == '?'}\n for pos in safe_by_proxy:\n self._open(*pos)\n\n # remove this space from any zones it was in.\n for tie in this_space.ties:\n for zone in list(self._lookup[tie].zones):\n if this_space.position in zone:\n new_zone = zone - {this_space.position}\n freq = self._lookup[tie].zones.pop(zone)\n if new_zone:\n self._lookup[tie].zones[new_zone] = freq",
"def cleanTileAtPosition(self, pos):\n #Return the floor of x as a float, the largest integer value less than\n #or equal to x\n posx = pos.getX()\n posy = pos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n self.tiles[(posx, posy)] = 1 # using 0 as dirty value, 1 as clean value, of key tuple pos(x,y)\n #self.printTiles()\n #raise NotImplementedError",
"def clear_cell(self, row=None, col=None):\n if row is not None:\n if row < 0 or row >= self.rows:\n raise ValueError('%d is not a valid row' % row)\n if col < 0 or col >= self.columns:\n raise ValueError('%d is not a valid column' % col)\n cellid = self._get_cell_id(row, col)\n else:\n cellid = None\n self._clear_component(cellid)",
"def clean(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n if x < -self.gap:\n self.del_asteroid(i)",
"def empty_room():\n\troom_material = pra.Material(energy_absorption=0.6, scattering=None)\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_material))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=True, air_absorption=False)\n\n\troom.add_source([0, 0, 2.])\n\troom.add_microphone([0, 0.2, 2.1])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.ray_tracing()\n\troom.compute_rir()\n\n\treturn room",
"def swap_states(matrix, floor, xaxis, rooms):\n for room in rooms:\n try:\n column = room\n row = 1\n while column > xaxis:\n column -= xaxis\n row += 1\n\n row -=1\n column -=1\n\n if column == -1:\n # catches a case when the user enters 0 as a room number.\n # would originally toggle the last element on the first row.\n continue\n\n if \"Y\" in matrix[floor][row][column]:\n matrix[floor][row][column] = matrix[floor][row][column].replace(\"Y\", \"\")\n else:\n matrix[floor][row][column] = \"Y\" + matrix[floor][row][column]\n # Row and column now contain the coordinates of the room.\n except IndexError:\n print()\n print(room,\"isn't one of the room numbers.\")\n print(\"Press enter keep working.\")\n input()\n return matrix",
"def _clear_rows(self) -> None:\n df = self.hotels_df.dropna()\n\n # Delete rows with non-float values in coordinates\n df = df[df[\"Latitude\"].apply(self.is_float)]\n df = df[df[\"Longitude\"].apply(self.is_float)]\n\n # Convert to Float\n df[\"Latitude\"] = df[\"Latitude\"].astype(float)\n df[\"Longitude\"] = df[\"Longitude\"].astype(float)\n\n # Delete rows with wrong values in coordinates\n df = df[df[\"Latitude\"].apply(lambda x: abs(x) <= 90)]\n df = df[df[\"Longitude\"].apply(lambda x: abs(x) <= 180)]\n\n self.hotels_df = df",
"def test_set_cell_with_too_large_row(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (9, 0), 0)",
"def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty",
"def delete_value(loc):\r\n (application.ui.__getattribute__(f'cell{loc.column+1}{loc.row+1}')).setText(\"\")\r\n sudoku_grid[loc.row, loc.column] = 0\r\n global cnt_free_cells\r\n cnt_free_cells += 1",
"def fill_blanks_randomly(grid):\n for row in grid:\n for i in range(len(row)):\n if row[i] is None:\n row[i] = get_random_char()",
"def test_buildings_rows(self):\n processed_buildings_output = buildings_clean(\n \"seattlecollision/data/raw_data/raw_buildings_input.csv\")\n self.assertTrue(processed_buildings_output.shape[0] >= 10)",
"def update_board(self, value, row=-1, col=-1, cell=-1):\n\n if row != -1 and col != -1 and cell == -1:\n _row,_col = row,col\n\n elif row == -1 and col == -1 and type(cell) == tuple:\n _row,_col = cell\n\n else:\n raise Exception(\"you must provide either row and column or a cell tuple\")\n\n group = self.calc_group(_row, _col)\n\n self.rows[_row].discard(value)\n self.columns[_col].discard(value)\n self.groups[group].discard(value)\n\n self.board[_row][_col] = value",
"def set_empty(self, row, col):\n self._cells[row][col] = EMPTY",
"def check_orient (tuple_top_L, tuple_bottom_R):\n\n #get the plate columns from the plate class\n columns = plate1536.columns\n\n #get the plate rows from the plate class\n rows = plate1536.rows\n\n #check if bottom_R is actually below top_L\n if rows.index(tuple_top_L[0]) > rows.index(tuple_bottom_R[0]):\n raise ValueError('Your bottom right well is ABOVE your top left well')\n\n #check if bottom_R is actually to the right of top_L\n if columns.index(int(tuple_top_L[1])) > columns.index(int(tuple_bottom_R[1])):\n raise ValueError('Your bottom right well is LEFT of your top left well')\n\n return None",
"def clear_rows(self):\n ### Previous version had a bug, in that it assumed the set of ###\n ### indices of full rows had to be a contiguous sequence! ###\n full_rows = [j for j in range(ROWS) if all(\n (i, j) in self.locked_squares for i in range(COLS))]\n if not full_rows: return\n ### Calculate how for to drop each other row, and do it ###\n drop = {j: len([k for k in full_rows if k > j]) for j in range(ROWS)}\n self.locked_squares = {(i, j+drop[j]): color for (i, j), color in\n self.locked_squares.items() if j not in full_rows}\n ### Now just update score, etc. ###\n d = len(full_rows)\n self.increment_lines(d)\n self.increment_score(self.level*{1: 40, 2: 100, 3: 300, 4: 1200}[d])\n if self.level < self.lines // 10 + 1:\n self.increment_level()",
"def create_room(w, h):\n # map[0] gives a list which represents the row, or the x. Then map[x][y]\n # gives the yth place in the column x.\n room = [[' ' for j in range(h)] for i in range(w)]\n for x in range(w):\n for y in range(h):\n if y == 0 or y == (h-1):\n room[x][y] = '-'\n elif x == 0 or x == (w-1):\n room[x][y] = '|'\n return room",
"def clear_cell(self, x, y):\n r = self.rect_area(x, y)\n background = pygame.Surface((75, 75)) # creates a white surface\n background.fill((255, 255, 255))\n self.screen.blit(background, (x * 80 + 3, 80 + y * 80 + 3)) # draw\n pygame.display.update(r) # update screen to showcase changes",
"def test_set_cell_with_negative_row(self):\n self.assertRaises(ValueError, self.sudoku.set_cell, (-1, 0), 0)",
"def test_board_not_full(self):\n\n self.controller.model.board[0][0] = '-'\n actual = self.controller.check_tie()\n self.assertFalse(actual)",
"def __init__(self, columns, rows, floor_char, wall_char):\n\t\tsuper().__init__(columns, rows, None)\n\t\tself.floor_char = floor_char\n\t\tself.wall_char = wall_char\n\t\tself.create()",
"def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move"
]
| [
"0.5899042",
"0.5685498",
"0.5672632",
"0.5614197",
"0.55412954",
"0.5491456",
"0.5426389",
"0.53508824",
"0.5341952",
"0.53339857",
"0.5284222",
"0.52741313",
"0.52462304",
"0.5235767",
"0.5235078",
"0.5192523",
"0.51497644",
"0.51288223",
"0.512652",
"0.51212054",
"0.5111696",
"0.51029044",
"0.50835735",
"0.507526",
"0.5065421",
"0.5028279",
"0.5016935",
"0.5015991",
"0.49992585",
"0.49912697"
]
| 0.6362954 | 0 |
Converts the NodeXMLFormatter object to an instance of the Node model. | def to_model(self):
node = Node.objects.get_or_create(
name=self.name,
description=self.description
)[0]
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_node(cls, obj):\n if isinstance(obj, cls):\n return obj\n elif is_string(obj):\n # Assume filepath.\n return FileNode(obj)\n elif obj is None:\n return obj\n else:\n raise TypeError(\"Don't know how to convert %s to Node instance.\" % obj)",
"def from_xml_node(cls, xml_node):\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n is_global = get_xml_text_value(xml_node, xml_tags.Elements.GLOBAL)\n object_type = get_xml_text_value(xml_node, xml_tags.Elements.TYPE)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n ip = get_xml_text_value(xml_node, xml_tags.Elements.IP)\n device_id = get_xml_int_value(xml_node, xml_tags.Elements.DEVICE_ID)\n comment = get_xml_text_value(xml_node, xml_tags.Elements.COMMENT)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n class_name = get_xml_text_value(xml_node, xml_tags.Elements.CLASS_NAME)\n management_domain = get_xml_text_value(xml_node, xml_tags.Elements.MANAGEMENT_DOMAIN)\n\n nat_info = None\n nat_info_node = get_xml_node(xml_node, xml_tags.Elements.NAT_INFO, optional=True)\n if nat_info_node is not None:\n nat_info_type = nat_info_node.attrib[xml_tags.Attributes.XSI_NAMESPACE_TYPE]\n if nat_info_type == xml_tags.Attributes.FORTIGATE_NAT_INFO:\n nat_info = FortigateNatInfo.from_xml_node(nat_info_node)\n\n return cls(display_name, is_global, object_id, name, object_type, ip, device_id, comment, implicit, uid,\n class_name=class_name, management_domain=management_domain, nat_info=nat_info)",
"def toXMLNode(self):\n return _libsbml.SBase_toXMLNode(self)",
"def convertNode(self, builder, typeName, data):\n\t\tif typeName not in self.nodeTypeMap:\n\t\t\traise Exception('Node type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.nodeTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)",
"def from_xml_node(cls, xml_node):\n raise NotImplementedError(\"from_xml_node must be implemented by derived classes.\")",
"def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )",
"def getNode(self):\n node = Element.getNode(self)\n node.tag = 'node'\n node.attrib['entity'] = self.entity.id\n return(node)",
"def tree(self) -> Node:\n return Node(self.to_string())",
"def from_xml_node(cls, xml_node):\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n is_global = get_xml_text_value(xml_node, xml_tags.Elements.GLOBAL)\n object_type = get_xml_text_value(xml_node, xml_tags.Elements.TYPE)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n netmask = get_xml_text_value(xml_node, xml_tags.Elements.NETMASK)\n ip = get_xml_text_value(xml_node, xml_tags.Elements.IP)\n device_id = get_xml_int_value(xml_node, xml_tags.Elements.DEVICE_ID)\n comment = get_xml_text_value(xml_node, xml_tags.Elements.COMMENT)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n return cls(display_name, is_global, object_id, name, object_type, ip, netmask, device_id, comment, implicit, uid)",
"def xpathCastNodeToNumber(self):\n ret = libxml2mod.xmlXPathCastNodeToNumber(self._o)\n return ret",
"def create_node_instance(self, node_type=None):\n if node_type in self.aliases:\n node_type = self.aliases[node_type]\n\n _NodeClass = self.__nodes.get(node_type)\n if _NodeClass:\n return _NodeClass()",
"def from_xml_node(cls, xml_node):\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n is_global = get_xml_text_value(xml_node, xml_tags.Elements.GLOBAL)\n object_type = get_xml_text_value(xml_node, xml_tags.Elements.TYPE)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n ip = get_xml_text_value(xml_node, xml_tags.Elements.IP)\n device_id = get_xml_int_value(xml_node, xml_tags.Elements.DEVICE_ID)\n comment = get_xml_text_value(xml_node, xml_tags.Elements.COMMENT)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n class_name = get_xml_text_value(xml_node, xml_tags.Elements.CLASS_NAME)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n return cls(display_name, is_global, object_id, name, object_type, ip, device_id, comment, implicit, class_name,\n uid)",
"def convert(cls, node_entry, model_container, node_dict):\n name = node_entry[\"name\"]\n shape = numpy.asarray(\n [a.value for a in node_entry[\"relay_node\"].attrs.newshape], dtype=numpy.int64\n )\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(shape, name, \"shape\", model_container),\n ]\n\n node = onnx.helper.make_node(cls.__name__, input_names, node_entry[\"output_names\"])\n model_container.add_nodes([node])",
"def from_xml_node(cls, xml_node):\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n return cls(name, display_name, object_id)",
"def parseNode( self, node ):\n\n if not node:\n return\n\n if ( not node.IsA( 'vtkMRMLModelNode' ) and not node.IsA( 'vtkMRMLModelHierarchyNode' ) ) or ( node.IsA( 'vtkMRMLModelNode' ) and node.GetHideFromEditors() ):\n return\n\n if self.__nodes.has_key( node.GetID() ):\n return\n\n parent_node = \"scene\"\n\n parentNode = None\n hNode = None\n\n if node.IsA( 'vtkMRMLModelNode' ):\n parentNode = slicer.app.applicationLogic().GetModelHierarchyLogic().GetModelHierarchyNode( node.GetID() )\n\n if parentNode:\n parentNode = parentNode.GetParentNode()\n\n elif node.IsA( 'vtkMRMLModelHierarchyNode' ):\n parentNode = node.GetParentNode()\n\n if parentNode:\n if parentNode.GetID() == node.GetID():\n return\n\n parent_node = parentNode.GetID()\n self.parseNode( parentNode )\n\n if not node.IsA( 'vtkMRMLModelHierarchyNode' ) or not node.GetModelNode():\n\n self.__nodes[node.GetID()] = node.GetName()\n self.__tree.create_node( node.GetName(), node.GetID(), parent=parent_node )",
"def from_xml_node(cls, xml_node):\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n is_global = get_xml_text_value(xml_node, xml_tags.Elements.GLOBAL)\n object_type = get_xml_text_value(xml_node, xml_tags.Elements.TYPE)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n first_ip = get_xml_text_value(xml_node, xml_tags.Elements.FIRST_IP)\n last_ip = get_xml_text_value(xml_node, xml_tags.Elements.LAST_IP)\n device_id = get_xml_int_value(xml_node, xml_tags.Elements.DEVICE_ID)\n comment = get_xml_text_value(xml_node, xml_tags.Elements.COMMENT)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n return cls(display_name, is_global, object_id, name, object_type, first_ip, last_ip, device_id, comment,\n implicit, uid)",
"def from_xml_node(cls, xml_node):\n num_id = get_xml_int_value(xml_node, Elements.ID)\n name = get_xml_text_value(xml_node, Elements.NAME)\n read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)\n\n network_objects = []\n for network_object_node in xml_node.iter(tag=Elements.NETWORK_OBJECT):\n network_object_type = network_object_node.attrib[TYPE_ATTRIB]\n if network_object_type == TYPE_DNS:\n network_object = Network_Object_DNS_Host.from_xml_node(network_object_node)\n elif network_object_type == TYPE_IP:\n network_object = Network_Object_IP_Address.from_xml_node(network_object_node)\n elif network_object_type == TYPE_ANY:\n network_object = Network_Object_Any.from_xml_node(network_object_node)\n elif network_object_type == TYPE_OBJECT:\n network_object = Network_Object_Object.from_xml_node(network_object_node)\n else:\n raise ValueError(\"Unknown network object type {}.\".format(network_object_type))\n network_objects.append(network_object)\n\n return cls(num_id, name, network_objects, read_only)",
"def from_xml_node(cls, xml_node):\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n is_global = get_xml_text_value(xml_node, xml_tags.Elements.GLOBAL)\n object_type = get_xml_text_value(xml_node, xml_tags.Elements.TYPE)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n ip = get_xml_text_value(xml_node, xml_tags.Elements.IP)\n class_name = get_xml_text_value(xml_node, xml_tags.Elements.CLASS_NAME)\n interfaces = XML_List.from_xml_node_by_tags(xml_node, xml_tags.Elements.INTERFACES, xml_tags.Elements.INTERFACE,\n Host_Interface, optional=True)\n device_id = get_xml_int_value(xml_node, xml_tags.Elements.DEVICE_ID)\n comment = get_xml_text_value(xml_node, xml_tags.Elements.COMMENT)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n return cls(display_name, is_global, object_id, name, object_type, ip, class_name, interfaces, device_id,\n comment, implicit, uid)",
"def from_xml_node(cls, xml_node):\n num_id = get_xml_int_value(xml_node, Elements.ID)\n ip_address = get_xml_text_value(xml_node, Elements.IP_ADDRESS)\n netmask = get_xml_text_value(xml_node, Elements.NETMASK)\n return cls(num_id, ip_address, netmask)",
"def convert(cls, node_entry, model_container, node_dict):\n attrs = cls.convert_attributes(node_entry[\"relay_node\"].attrs)\n\n name = node_entry[\"name\"]\n pad_data = numpy.asarray(attrs[\"pads\"], dtype=attrs[\"pads\"][0].dtype).astype(numpy.int64)\n\n input_names = [\n node_entry[\"input_names\"][0],\n add_input(pad_data, name, \"pads\", model_container),\n node_entry[\"input_names\"][1],\n ]\n\n node = onnx.helper.make_node(\n cls.__name__, input_names, node_entry[\"output_names\"], mode=attrs[\"mode\"]\n )\n model_container.add_nodes([node])",
"def from_xml_node(cls, xml_node):\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n return cls(name, display_name, object_id, uid, implicit)",
"def from_xml_node(cls, xml_node):\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n return cls(name, display_name, object_id, uid, implicit)",
"def from_xml_node(cls, xml_node):\n name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)\n object_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)\n display_name = get_xml_text_value(xml_node, xml_tags.Elements.DISPLAY_NAME)\n uid = get_xml_text_value(xml_node, xml_tags.Elements.UID)\n implicit = get_xml_text_value(xml_node, xml_tags.Elements.IMPLICIT)\n return cls(name, display_name, object_id, uid, implicit)",
"def node(self):\n return Node(self)",
"def from_xml_node(cls, xml_node):\n num_id = get_xml_int_value(xml_node, Elements.ID)\n object_name = get_xml_text_value(xml_node, Elements.OBJECT_NAME)\n object_type = get_xml_text_value(xml_node, Elements.OBJECT_TYPE)\n object_details = get_xml_text_value(xml_node, Elements.OBJECT_DETAILS)\n management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)\n management_id = get_xml_int_value(xml_node, Elements.MANAGEMENT_ID)\n object_UID = get_xml_text_value(xml_node, Elements.OBJECT_UID)\n return cls(num_id, object_name, object_type, object_details, management_name, management_id, object_UID)",
"def from_xml_node(cls, xml_node):\n num_id = get_xml_int_value(xml_node, Elements.ID)\n object_name = get_xml_text_value(xml_node, Elements.OBJECT_NAME)\n object_type = get_xml_text_value(xml_node, Elements.OBJECT_TYPE)\n object_details = get_xml_text_value(xml_node, Elements.OBJECT_DETAILS)\n management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)\n management_id = get_xml_int_value(xml_node, Elements.MANAGEMENT_ID)\n object_UID = get_xml_text_value(xml_node, Elements.OBJECT_UID)\n return cls(num_id, object_name, object_type, object_details, management_name, management_id, object_UID)",
"def convertNode(cls, node):\n if isinstance(node, cls):\n if len(node) == 1:\n return cls.NodeProxy(node[0])\n return node\n elif isinstance(node, list):\n if len(node) > 1:\n return cls(node)\n else:\n return cls.NodeProxy(node[0])\n else:\n return cls.NodeProxy(node)",
"def get_node(self, node_id) -> Node:\n return self._node_serializer.from_data(graph=self, **self._collection.get_record(node_id))",
"def convert(self, node):\n # get the conversion lut\n node_type = self.get_node_type(node)\n conversion_specs = self.conversion_spec_sheet.get(node_type)\n if not conversion_specs:\n print('No conversion_specs for: %s' % node_type)\n return\n\n # call any call_before\n call_before = conversion_specs.get('call_before')\n if call_before and callable(call_before):\n call_before(node)\n\n # some conversion specs doesn't require a new node to be created\n # so return early if this is the case\n if 'node_type' not in conversion_specs:\n return node\n\n node_creator = self.node_creator_factory(conversion_specs)\n rs_node = node_creator.create()\n\n # rename the material to have a similar name with the original\n if rs_node is not None:\n node_type_name = conversion_specs['node_type'] \\\n if isinstance(conversion_specs['node_type'], str) else \\\n conversion_specs['secondary_type'].replace(' ', '_')\n\n self.rename_node(\n rs_node,\n self.get_node_name(node).replace(\n node_type, node_type_name\n )\n )\n else:\n rs_node = node\n\n # set attributes\n attributes = conversion_specs.get('attributes')\n if attributes:\n for source_attr, target_attr in attributes.items():\n # value can be a string\n if isinstance(target_attr, basestring):\n # check incoming connections\n incoming_connections = \\\n self.get_node_inputs(node, source_attr)\n if incoming_connections:\n # connect any textures to the target node\n for input_ in incoming_connections:\n # input_ >> rs_node.attr(target_attr)\n self.connect_attr(\n input_,\n rs_node,\n target_attr\n )\n else:\n # just read and set the value directly\n self.set_attr(\n rs_node,\n target_attr,\n self.get_attr(node, source_attr)\n )\n\n elif isinstance(target_attr, list):\n # or a list\n # where we set multiple attributes in the rs_node to the\n # same value\n # source_attr_value = node.getAttr(source_attr)\n source_attr_value = self.get_attr(node, source_attr)\n for attr in target_attr:\n self.set_attr(rs_node, attr, source_attr_value)\n # for input_ in node.attr(source_attr).inputs(p=1):\n for input_ in self.get_node_inputs(node, source_attr):\n self.connect_attr(input_, rs_node, attr)\n elif isinstance(target_attr, dict):\n # or another dictionary\n # where we have a converter\n source_attr_value = self.get_attr(node, source_attr)\n for attr, converter in target_attr.items():\n if callable(converter):\n try:\n attr_value = converter(source_attr_value)\n except TypeError:\n # it should use two parameters, also include\n # the node itself\n try:\n attr_value = converter(\n source_attr_value,\n node\n )\n except TypeError:\n # so this is the third form that also\n # includes the rs node\n attr_value = converter(\n source_attr_value,\n node,\n rs_node\n )\n else:\n attr_value = converter\n self.set_attr(rs_node, attr, attr_value)\n\n # call any call_after\n call_after = conversion_specs.get('call_after')\n if call_after and callable(call_after):\n call_after(node, rs_node)\n\n return rs_node",
"def from_xml_node(cls, xml_node):\n ip = get_xml_text_value(xml_node, xml_tags.Elements.IP)\n netmask = get_xml_text_value(xml_node, xml_tags.Elements.NETMASK)\n precedence = get_xml_text_value(xml_node, xml_tags.Elements.PRECEDENCE)\n visibility = get_xml_text_value(xml_node, xml_tags.Elements.VISIBILITY)\n return cls(ip, netmask, precedence, visibility)"
]
| [
"0.6461898",
"0.59501135",
"0.5938948",
"0.5907918",
"0.581737",
"0.57266885",
"0.57264614",
"0.5676728",
"0.5670979",
"0.5602469",
"0.55902135",
"0.55887836",
"0.5583109",
"0.55772483",
"0.5562262",
"0.5556643",
"0.55405396",
"0.5535847",
"0.5533974",
"0.55142635",
"0.5497968",
"0.5497968",
"0.5497968",
"0.549423",
"0.54658616",
"0.54658616",
"0.5456607",
"0.5390013",
"0.53676707",
"0.53490824"
]
| 0.6676547 | 0 |
Symmetrically pad image with constant ``pad_value`` to obtain square image. | def pad_to_square(img_t: torch.Tensor, pad_value: float = 0
) -> Tuple[torch.Tensor, Tuple[int, int, int, int]]:
if len(img_t.shape) == 3:
_, height, width = img_t.shape
elif len(img_t.shape) == 2:
height, width = img_t.shape
else:
raise ValueError("Wrong image shape ({}); expected 2 or 3 dimensions"
.format(img_t.shape))
dim_diff: int = np.abs(height - width)
# (upper / left) padding and (lower / right) padding
pad1: int = dim_diff // 2
pad2: int = dim_diff - dim_diff // 2
# padding put together:
pad: Tuple[int, int, int, int] = (0, 0, pad1, pad2) \
if height <= width else (pad1, pad2, 0, 0)
# Add padding to image
img_t = torch.nn.functional.pad(img_t, list(pad), value=pad_value)
return img_t, pad | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pad_to_square(image, min_size, **pad_kwargs):\n\n h, w = image.shape[:2]\n\n if h >= min_size and w >= min_size:\n return image\n\n top = bottom = left = right = 0\n\n if h < min_size:\n top = (min_size - h) // 2\n bottom = min_size - h - top\n if w < min_size:\n left = (min_size - w) // 2\n right = min_size - w - left\n\n return np.pad(image,\n ((top, bottom),\n (left, right),\n (0, 0)), **pad_kwargs)",
"def pad_to_square(a):\n if a.shape[1]>a.shape[0]: # pad height\n n_to_add = a.shape[1]-a.shape[0]\n top_pad = n_to_add//2\n bottom_pad = n_to_add-top_pad\n a = np.pad(a, [(top_pad, bottom_pad), (0, 0), (0, 0)], mode='constant')\n\n elif a.shape[0]>a.shape[1]: # pad width\n n_to_add = a.shape[0]-a.shape[1]\n left_pad = n_to_add//2\n right_pad = n_to_add-left_pad\n a = np.pad(a, [(0, 0), (left_pad, right_pad), (0, 0)], mode='constant')\n else:\n pass\n return a",
"def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')",
"def pad(input, pad, mode='constant', value=0):\n ndim = input.ndimension()\n pads_begin, pads_end = [0] * ndim, [0] * ndim\n for i in range(len(pad) // 2):\n pads_begin[ndim - 1 - i] = pad[i * 2]\n pads_end[ndim - 1 - i] = pad[i * 2 + 1]\n mode = {'constant': 'CONSTANT', 'reflect': 'REFLECT',\n 'replicate': 'EDGE', 'circular': 'EDGE'}[mode]\n return FunctionLib.apply(\n 'Pad', input.device, [input], mode=mode, value=float(value),\n ndim=ndim, pads=pads_begin + pads_end)",
"def padd2square(img, value=0):\n h, w, c = img.shape\n if h >= w:\n squareImg = np.ones((h, h, c), dtype=img.dtype) * value\n p = int((h-w)/2)\n squareImg[:, p:p+w, :] = img\n else:\n squareImg = np.ones((w, w, c), dtype=img.dtype) * value\n p = int((w-h)/2)\n squareImg[p:p+h, :, :] = img\n return squareImg",
"def pad_scaled_square(pts, width, r_pad, log=None):\n lib = _initlib()\n p = require(pts, dtype=float64, requirements=['C']) \n inv_width = 1.0/width\n N = len(p)\n dim = 2\n assert(p.shape[1]==dim)\n \n fudge = 2.0 \n guess_images = int(fudge*N*dim*r_pad/width) + 1\n \n new_pos = empty((N+guess_images,2), dtype=float64)\n pad_idx = empty((guess_images,), dtype=int64)\n \n new_ims = lib.pad_square(inv_width, r_pad, N, p, new_pos, pad_idx, guess_images)\n while new_ims<0:\n guess_images *= 2\n if log is not None:\n print('Too many images! Doubling size...', file=log)\n\n new_pos = empty((N+guess_images,2), dtype=float64)\n pad_idx = empty((guess_images,), dtype=int64)\n new_ims = lib.pad_square(inv_width, r_pad, N, p, new_pos, pad_idx, guess_images)\n \n if log is not None:\n print('{:,} images (c.f. guessed={:,})'.format(new_ims, guess_images), file=log)\n\n # Arrays contain only used vals\n new_pos = new_pos[:N+new_ims] # originals+images\n pad_idx = pad_idx[:new_ims] # images\n return pad_idx, new_pos",
"def resize_image_to_square(img, side, pad_cval=0, dtype=np.float64):\n\n if len(img.shape) == 2:\n h, w = img.shape\n if h == w:\n padded = img.copy()\n elif h > w:\n padded = np.full((h, h), pad_cval, dtype=dtype)\n l = int(h / 2 - w / 2) # guaranteed to be non-negative\n r = l + w\n padded[:, l:r] = img.copy()\n else:\n padded = np.full((w, w), pad_cval, dtype=dtype)\n l = int(w / 2 - h / 2) # guaranteed to be non-negative\n r = l + h\n padded[l:r, :] = img.copy()\n elif len(img.shape) == 3:\n h, w, ch = img.shape\n if h == w:\n padded = img.copy()\n elif h > w:\n padded = np.full((h, h, ch), pad_cval, dtype=dtype)\n l = int(h / 2 - w / 2) # guaranteed to be non-negative\n r = l + w\n padded[:, l:r, :] = img.copy()\n else:\n padded = np.full((w, w, ch), pad_cval, dtype=dtype)\n l = int(w / 2 - h / 2) # guaranteed to be non-negative\n r = l + h\n padded[l:r, :, :] = img.copy()\n else:\n raise Exception('only images of 2d and 3d shape are accepted')\n\n resized_img = resize(padded, output_shape=(side, side))\n\n return resized_img",
"def image_pad(image, pad_width=None, axis=0, mode='symmetric'):\n hei, wid = image.shape[0], image.shape[1]\n\n if pad_width is None:\n th = hei // 10\n tw = wid // 10\n pad_width = ((th, th), (tw, tw), (0, 0))\n if axis == 0:\n if type(pad_width[0]) == tuple:\n pad_width = (pad_width[0], (0, 0), (0, 0))\n else:\n pad_width = (pad_width, (0, 0), (0, 0))\n if axis == 1:\n if type(pad_width[0]) == tuple:\n pad_width = ((0, 0), pad_width[1], (0, 0))\n else:\n pad_width = ((0, 0), pad_width, (0, 0))\n if len(image.shape) == 3:\n newimage = np.pad(image, pad_width, mode)\n elif len(image.shape) == 2:\n newimage = np.squeeze(np.pad(image[:, :, np.newaxis], pad_width, mode))\n\n return cv2.resize(newimage, (wid, hei), interpolation=cv2.INTER_NEAREST)",
"def padImage(image, padList):\r\n\r\n #pad along far x:<---->\r\n padFarX= np.zeros((image.shape[0], image.shape[1], padList[0]))\r\n image= np.concatenate((image, padFarX), axis=2)\r\n\r\n #pad along far y\r\n padFarY= np.zeros((image.shape[0], padList[1], image.shape[2]))\r\n image= np.concatenate((image, padFarY), axis=1)\r\n\r\n #pad along far z\r\n padFarZ= np.zeros((padList[2], image.shape[1], image.shape[2]))\r\n image= np.concatenate((image, padFarZ), axis=0)\r\n\r\n #pad along close x, adjust center\r\n padCloseX= np.zeros((image.shape[0], image.shape[1], padList[3]))\r\n image= np.concatenate((padCloseX, image), axis=2)\r\n\r\n #pad along close y adjust center\r\n padCloseY= np.zeros((image.shape[0], padList[4], image.shape[2]))\r\n image= np.concatenate((padCloseY, image), axis=1)\r\n\r\n #pad along close z, adjust center\r\n padCloseZ= np.zeros((padList[5], image.shape[1], image.shape[2]))\r\n image= np.concatenate((padCloseZ, image), axis=0)\r\n\r\n\r\n #print \"PADDED IMAGE SHAPE: \" + str(image.shape)\r\n return image",
"def pad_image(input_img, window_size, padding_mode='symmetric'):\n assert np.isscalar(window_size)\n assert window_size % 2 == 1\n\n # Padding width must be window_size-1 and divided by 2. So that we can check every pixels\n pad_width = int((window_size-1)/2)\n # For each padding_mode, pad differently\n\n # But in result, I chose symmetric cause it seems to have smallest aepe\n if padding_mode == 'symmetric':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'reflect':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n elif padding_mode == 'constant':\n padded_img = np.pad(input_img, pad_width, padding_mode)\n\n return padded_img",
"def pad_same(width, kernel, slide):\n res = (width - kernel) / slide + 1\n pad = (width - res) / 2\n return pad",
"def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img",
"def pad_image(img, target_size):\r\n rows_missing = target_size[0] - img.shape[2]\r\n cols_missing = target_size[1] - img.shape[3]\r\n padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant')\r\n return padded_img",
"def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image",
"def _pad_image(self, img: ndarray, pad_width: int = 10) -> ndarray:\n self.padded_img = np.zeros(\n (img.shape[0] + pad_width*2, img.shape[1]+pad_width*2))\n self.padded_img[pad_width:-pad_width, pad_width:-pad_width] = img\n return self.padded_img",
"def pad_image(img, output_path, pad_size=[8,8,8,8], buckets=None):\n top, left, bottom, right = pad_size\n old_im = Image.open(img)\n old_size = (old_im.size[0] + left + right, old_im.size[1] + top + bottom)\n new_size = get_new_size(old_size, buckets)\n new_im = Image.new(\"RGB\", new_size, (255,255,255))\n new_im.paste(old_im, (left, top))\n new_im.save(output_path)",
"def pad(size, value):\n return (value + size - 1)/size*size",
"def pad_to(image,w,h):\n iw,ih = image.shape\n wd = int(w-iw)\n assert wd>=0\n w0 = wd/2\n w1 = wd-w0\n hd = int(h-ih)\n assert hd>=0\n h0 = hd/2\n h1 = hd-h0\n result = zeros((w,h))\n result[w0:w0+iw,h0:h0+ih] = image\n return result",
"def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)",
"def pad(img, pad_size=32):\n\n if pad_size == 0:\n return img\n\n height, width = img.shape[:2]\n\n if height % pad_size == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = pad_size - height % pad_size\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n if width % pad_size == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = pad_size - width % pad_size\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)",
"def pad_square(box):\n x, y, xw, yh = box\n w = xw-x\n h = yh-y\n if w < h:\n w = h\n elif h < w:\n h = w\n return [x, y, x+w, y+h]",
"def pad_im(im, factor, value=0):\n height = im.shape[0]\n width = im.shape[1]\n\n pad_height = int(np.ceil(height / float(factor)) * factor - height)\n pad_width = int(np.ceil(width / float(factor)) * factor - width)\n\n if len(im.shape) == 3:\n return np.lib.pad(\n im, ((0, pad_height), (0, pad_width), (0, 0)),\n 'constant',\n constant_values=value)\n elif len(im.shape) == 2:\n return np.lib.pad(\n im, ((0, pad_height), (0, pad_width)),\n 'constant',\n constant_values=value)",
"def pad_img(image, label):\n paddings = [[2,2],[2,2],[0,0]]\n return tf.pad(image, paddings, mode=\"CONSTANT\", constant_values=0.0), label",
"def image_pad(image, pixel_loc_x, pixel_loc_y):\r\n input_size = np.shape(image)\r\n padded_image = np.zeros((input_size[0]+200, input_size[1]+200, 1))\r\n if np.size(input_size) == 2:\r\n padded_image[:, :, 0] = skut.pad(image[:, :], 100, mode='constant', constant_values=float(0))\r\n else:\r\n for i in range(input_size[2]):\r\n if i == 0:\r\n padded_image[:, :, 0] = skut.pad(image[:, :, i], 100, mode='constant', constant_values=float(0))\r\n else:\r\n padded_dim = np.zeros((input_size[0]+200, input_size[1]+200, 1))\r\n padded_dim[:, :, 0] = skut.pad(image[:, :, i], 100, mode='constant', constant_values=float(0))\r\n padded_image = np.append(padded_image, padded_dim, axis=2)\r\n pixel_loc_x = pixel_loc_x + 100\r\n pixel_loc_y = pixel_loc_y + 100\r\n return padded_image, pixel_loc_x, pixel_loc_y",
"def pad(self, pad_size, symmetric=False):\n samples_ndim = self._samples.ndim\n if samples_ndim == 1:\n pad_width = pad_size if symmetric else (0, pad_size)\n elif samples_ndim == 2:\n # pad samples, keep channels\n pad_width = ((pad_size, pad_size), (0, 0)) if symmetric else ((0, pad_size), (0, 0))\n else:\n raise NotImplementedError(\n f\"Padding not implemented for signals with more that 2 dimensions. Current samples dimension: {samples_ndim}.\"\n )\n # apply padding\n self._samples = np.pad(self._samples, pad_width, mode='constant',)",
"def pil_pad_image(img, v_pad_before, v_pad_after, h_pad_before, h_pad_after, cval=None):\n # type: (PImage.Image, int, int, int, int, tuple) -> PImage.Image\n\n width = img.width + h_pad_before + h_pad_after\n height = img.height + v_pad_before + v_pad_after\n mode = img.mode\n\n if width == img.width and height == img.height:\n return img\n\n # Make sure the cval is in the correct format if None default to black\n if cval is not None:\n if isinstance(cval, float):\n cval = int(round(cval))\n elif isinstance(cval, int):\n cval = cval\n else:\n cval = np.round(cval).astype(dtype=np.int32)\n cval = tuple(cval)\n else:\n cval = 0\n\n try:\n padded_img = PImage.new(mode=mode, size=(width, height), color=cval)\n padded_img.paste(img, box=(h_pad_before, v_pad_before))\n except TypeError as e:\n print 'ERROR: Could not create new PIL image PImage.new(mode={}, size={}, color={}), error: {}'.format(mode, (width, height), cval, e.message)\n raise e\n\n return padded_img",
"def pad(img, padding, fill=0, padding_mode='constant'):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n if not isinstance(padding, (numbers.Number, tuple)):\n raise TypeError('Got inappropriate padding arg')\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError('Got inappropriate fill arg')\n if not isinstance(padding_mode, str):\n raise TypeError('Got inappropriate padding_mode arg')\n\n if isinstance(padding, Sequence) and len(padding) not in [2, 4]:\n raise ValueError(\"Padding must be an int or a 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \\\n 'Padding mode should be either constant, edge, reflect or symmetric'\n\n # if padding_mode == 'constant':\n # aug = iaa.Pad(px=padding, pad_mode=padding_mode, pad_cval=fill, keep_size=False)\n # return aug.augment_image(img)\n # else:\n if isinstance(padding, int):\n pad_left = pad_right = pad_top = pad_bottom = padding\n if isinstance(padding, Sequence) and len(padding) == 2:\n pad_top = pad_bottom = padding[0]\n pad_left = pad_right = padding[1]\n if isinstance(padding, Sequence) and len(padding) == 4:\n pad_top = padding[0]\n pad_left = padding[1]\n pad_bottom = padding[2]\n pad_right = padding[3]\n\n aug = iaa.CropAndPad(px=(pad_top, pad_right, pad_bottom, pad_left), pad_mode=padding_mode, pad_cval=fill,\n keep_size=False)\n # aug = iaa.CropAndPad(px=(pad_top, pad_right, pad_bottom, pad_left), pad_mode=padding_mode, keep_size=False)\n return aug.augment_image(img)\n\n # # RGB image\n # if len(img.shape) == 3:\n # aug = iaa.Pad(px=((pad_top, pad_bottom), (pad_left, pad_right)),\n # pad_mode=padding_mode, keep_size=False)\n # return aug.augment_image(img)\n # # Grayscale image\n # if len(img.shape) == 2:\n # aug = iaa.Pad(px=((pad_top, pad_bottom), (pad_left, pad_right)),\n # pad_mode=padding_mode, keep_size=False)\n # return aug.augment_image(img)\n\n # return img",
"def _pad_img(self, results):\n pad_val = self.pad_val.get('img', 0)\n for key in results.get('img_fields', ['img']):\n if self.pad_to_square:\n max_size = max(results[key].shape[:2])\n self.size = (max_size, max_size)\n if self.size is not None:\n padded_img = general_ocr.impad(\n results[key], shape=self.size, pad_val=pad_val)\n elif self.size_divisor is not None:\n padded_img = general_ocr.impad_to_multiple(\n results[key], self.size_divisor, pad_val=pad_val)\n results[key] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor",
"def pad(x, system_shape, pad_size):\n res = unpad(tf.tile(x, (1,)+(3,)*len(pad_size)),\n tuple(s-p for s, p in zip(system_shape, pad_size)))\n return res",
"def pad(self, *args, **kwargs):\n return _image.image_pad(self, *args, **kwargs)"
]
| [
"0.7438983",
"0.706913",
"0.69397676",
"0.6717907",
"0.6487376",
"0.644641",
"0.63947856",
"0.6394612",
"0.6323862",
"0.63060987",
"0.62543195",
"0.6251084",
"0.6170857",
"0.6157992",
"0.61550736",
"0.61489666",
"0.6144223",
"0.6112948",
"0.605924",
"0.60216105",
"0.59898394",
"0.59822536",
"0.59706366",
"0.5968293",
"0.59462255",
"0.5946193",
"0.59348077",
"0.5922499",
"0.59009737",
"0.58964473"
]
| 0.7703733 | 0 |
Interpolation to use for the resizing. | def interpolation(self) -> int:
return self._interpolation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interpolation(self):\n return self._image.interpolation",
"def interpolation(self):\n return self._interpolation",
"def GetInterpolation(self, *args, **kwargs):\n pass",
"def SetInterpolation(self, *args, **kwargs):\n pass",
"def interpolate(self, image):\n return",
"def resize(self, image, fx, fy, interpolation):\n\n if interpolation == 'bilinear':\n return self.bilinear_interpolation(image, fx, fy)\n\n elif interpolation == 'nearest_neighbor':\n return self.nearest_neighbor(image, fx, fy)",
"def interpolate(\n input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=False,\n):\n if size is not None:\n size = nest.flatten(size)\n if scale_factor is not None:\n scale_factor = nest.flatten(scale_factor)\n mode = mode.upper()\n mode = mode.replace('BILINEAR', 'LINEAR')\n mode = mode.replace('TRILINEAR', 'LINEAR')\n return FunctionLib.apply(\n 'Resize', input.device, [input],\n mode=mode, align_corners=align_corners,\n num_sizes=len(size) if size is not None else 0,\n num_scales=len(scale_factor) if scale_factor is not None else 0,\n sizes=size, scales=scale_factor)",
"def _get_interpolation(self) :\n \n return self._interpolation",
"def interpolate(self):\n interp = (\n self._get_ticks() - self._last_update\n ) / self._tick_step / self.dilation\n if interp > 1.0:\n interp = 1.0\n return interp",
"def resize(self, image, fx=None, fy=None, interpolation=None):\r\n if interpolation == 'bilinear':\r\n return self.bilinear_interpolation(image, float(fx), float(fy))\r\n\r\n elif interpolation == 'nearest_neighbor':\r\n return self.nearest_neighbor(image, float(fx), float(fy))",
"def resize(self, image, fx=None, fy=None, interpolation=None):\n if interpolation == 'bilinear':\n return self.bilinear_interpolation(image, float(fx), float(fy))\n\n elif interpolation == 'nearest_neighbor':\n return self.nearest_neighbor(image, float(fx), float(fy))",
"def SetInterpolationQuality(*args, **kwargs):\n return _gdi_.GraphicsContext_SetInterpolationQuality(*args, **kwargs)",
"def interpolation_mode(self):\n return self._interpolation_mode",
"def _interpolate_bg(self, bg, size:tuple):\n\t\tbg = torch.from_numpy(bg).float().unsqueeze(0)/255.\n\t\tbg = F.interpolate(bg, size=size, mode='bilinear', align_corners=False)\n\t\treturn (bg*255.).byte().squeeze(0).numpy()",
"def scale(self):",
"def resize(orig, factor, method=\"nearest\"):\r\n method_dict = {'nearest': 0, 'bilinear': 1, 'cubic': 2}\r\n if method.lower() not in method_dict:\r\n raise ValueError(\"Invalid interpolation method. Options are: \" + \", \".join(method_dict.keys()))\r\n try:\r\n return zoom(orig, factor, order=method_dict[method.lower()])\r\n except RuntimeError:\r\n # raised by zoom when factor length does not match orig.shape length\r\n raise ValueError(\"Factor sequence length does not match input length\")",
"def resize_like(x: Tensor, target: Tensor, mode: str = \"bilinear\", align_corners: Union[bool, None] = True) -> Tensor:\n return torch.nn.functional.interpolate(x, target.size()[2:], mode=mode, align_corners=align_corners)",
"def interpolate(self, interpolation=\"nearest\", **kwargs):\n return podpac.interpolators.Interpolate(source=self, interpolation=interpolation, **kwargs)",
"def interpolate(self, distance, normalized=...): # -> BaseGeometry:\n ...",
"def resize(self):\n e = self.e\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n e = self.er\n self.dsize = numpy.clip((self.np_req/self.np)**(1./e), 1/self.r, self.r)\n self.size *= self.dsize",
"def interpolated(self, Any, Any_1, p_float): # real signature unknown; restored from __doc__\n pass",
"def _resize_short_within(self, img, short, max_size, mult_base=1, interp=Image.BILINEAR):\n w, h = img.size\n im_size_min, im_size_max = (h, w) if w > h else (w, h)\n scale = float(short) / float(im_size_min)\n if np.round(scale * im_size_max / mult_base) * mult_base > max_size:\n # fit in max_size\n scale = float(np.floor(max_size / mult_base) * mult_base) / float(im_size_max)\n new_w, new_h = (int(np.round(w * scale / mult_base) * mult_base),\n int(np.round(h * scale / mult_base) * mult_base))\n img = img.resize((new_w, new_h), interp)\n return img",
"def uses_interpolation(self) -> bool:\r\n return False",
"def upsample_bilinear(input, size=None, scale_factor=None):\n return interpolate(input, size, scale_factor, 'linear', align_corners=True)",
"def resize(self, width, height):\n\t\tself._set_image(\n\t\t\tSolidColorImagePattern(\n\t\t\t\tcolor=(self._r,self._g,self._b,self._a)\n\t\t\t).create_image(width, height)\n\t\t)",
"def resizePreview(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n width = 300\n height = int(float(width) / ratio)\n else:\n height = 170\n width = int(float(height) / ratio)\n if 'prodManager' in os.path.basename(self._ima):\n width = 300\n height = 170\n self.lPreview.setMinimumSize(width, height)\n self.lPreview.setMaximumSize(width, height)",
"def SetInterpolation(self, interp_type):\n return _hypre.HypreBoomerAMG_SetInterpolation(self, interp_type)",
"def _interpolation(self, video):\n self.F_int = []\n self.mgrid_0 = []\n self.mgrid_1 = []\n for p in range(video.points.shape[0]):\n _m_0, _m_1 = np.meshgrid(self.extended_points_0[p], self.extended_points_1[p])\n _F_int = interp2d(self.extended_points_0[p], self.extended_points_1[p], video.mraw[0, _m_0, _m_1], kind='cubic')\n self.F_int.append(_F_int)\n\n m_0, m_1 = np.meshgrid(self.extended_points_0[p, self.pad:-self.pad], self.extended_points_1[p, self.pad:-self.pad])\n self.mgrid_0.append(m_0)\n self.mgrid_1.append(m_1)",
"def img_zoom(img, fx, fy, interp=cv2.INTER_AREA):\n res = cv2.resize(img, None, fx=fx, fy=fy,\n interpolation=interp)\n return res",
"def test_interpolation_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.interpComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n assert opts == {'bicubic', 'bilinear', 'kaiser', 'nearest', 'spline36'}\n # programmatically adding approved interpolation works\n layer.interpolation = 'lanczos'\n assert combo.findText('lanczos') == 5"
]
| [
"0.6862421",
"0.67051554",
"0.66868347",
"0.6675438",
"0.65176684",
"0.6433487",
"0.6390173",
"0.6389773",
"0.6385963",
"0.6005822",
"0.5961178",
"0.5899193",
"0.58686846",
"0.5786912",
"0.57550836",
"0.5739371",
"0.57344615",
"0.5728818",
"0.57154495",
"0.5625937",
"0.56257445",
"0.5599057",
"0.5594488",
"0.5592649",
"0.5568518",
"0.55680084",
"0.5567505",
"0.55104977",
"0.5487192",
"0.54214483"
]
| 0.6812813 | 1 |
Forward method in which to apply the trafo and thresholding. Prethreshold, modify, and postthreshold given mask(s). The thresholding is applied, if the corresponding | def __call__(self, masks: torch.Tensor) -> torch.Tensor:
if self.pre_thresholder is not None:
masks = self.pre_thresholder(masks)
if not self.batch_wise:
masks: torch.Tensor = masks.unsqueeze(0)
modified_masks: torch.Tensor = self.trafo(masks)
if not self.batch_wise:
modified_masks = masks.squeeze(0)
if self.post_thresholder is not None:
modified_masks = self.post_thresholder(modified_masks)
return modified_masks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, x, mask):\n context_vector, attn_weights = self.self_mha(x, x, x, mask)\n x = self.layer_norm1(\n F.dropout(x + context_vector, self.dropout, training=self.training))\n\n x = self.layer_norm2(\n F.dropout(x + self.ffn(x), self.dropout, training=self.training))\n return x, attn_weights",
"def forward_prop(net, input_values, threshold_fn=stairstep):\n raise NotImplementedError",
"def add_partial_mask(self, x: int, y: int, model_output: Dict[str, torch.Tensor], threshold: float) -> None:\n\n masks = model_output['masks'].detach().cpu().numpy()\n\n for i in range(masks.shape[0]):\n try:\n if model_output['scores'][i] < threshold:\n continue\n except KeyError:\n pass\n\n\n if masks.ndim == 4:\n self.colormask[0, x:x + masks.shape[-2], y:y + masks.shape[-1]][masks[i, 0, :, :] > 0.5] = \\\n self.simple_colors[model_output['labels'][i] - 1][0]\n self.colormask[1, x:x + masks.shape[-2], y:y + masks.shape[-1]][masks[i, 0, :, :] > 0.5] = \\\n self.simple_colors[model_output['labels'][i] - 1][1]\n self.colormask[2, x:x + masks.shape[-2], y:y + masks.shape[-1]][masks[i, 0, :, :] > 0.5] = \\\n self.simple_colors[model_output['labels'][i] - 1][2]\n else:\n self.colormask[x:x + masks.shape[1] - 1, y:y + masks.shape[2] - 1, 0][masks[i, :, :] > 0.5] = \\\n self.simple_colors[model_output['labels'][i] - 1][0]\n self.colormask[x:x + masks.shape[1] - 1, y:y + masks.shape[2] - 1, 1][masks[i, :, :] > 0.5] = \\\n self.simple_colors[model_output['labels'][i] - 1][1]\n self.colormask[x:x + masks.shape[1] - 1, y:y + masks.shape[2] - 1, 2][masks[i, :, :] > 0.5] = \\\n self.simple_colors[model_output['labels'][i] - 1][2]",
"def forward(self,\n pos_outputs: torch.Tensor,\n neg_outputs: torch.Tensor,\n mask: torch.Tensor = None) -> torch.Tensor:\n # Calculate loss by functional method\n loss = adaptive_hinge_loss(pos_outputs, neg_outputs, self.margin)\n\n # Apply masking and take reduction on loss\n return self.reduction(apply_mask(loss, mask)) if mask is not None \\\n else self.reduction(loss)",
"def forward(self, pred, gt, mask=None):\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n if mask is not None:\n pos_inds = pos_inds * mask\n neg_inds = neg_inds * mask\n neg_weights = torch.pow(1 - gt, self.beta)\n loss = 0\n pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss",
"def forward(self,\n pos_outputs: torch.Tensor,\n neg_outputs: torch.Tensor,\n mask: torch.Tensor = None) -> torch.Tensor:\n # Calculate loss by functional method\n loss = hinge_loss(pos_outputs, neg_outputs, self.margin)\n\n # Apply masking and take reduction on loss\n return self.reduction(apply_mask(loss, mask)) if mask is not None \\\n else self.reduction(loss)",
"def global_threshold(img, threshold_method):\n pass",
"def forward(self, x, x_mask):\n # No padding necessary.\n if x_mask.data.sum() == 0:\n return self._forward_unpadded(x, x_mask)\n # Pad if we care or if its during eval.\n if self.padding or not self.training:\n return self._forward_padded(x, x_mask)\n # We don't care.\n return self._forward_unpadded(x, x_mask)",
"def forward(self, x: torch.Tensor, x_mask: torch.Tensor, proto: torch.Tensor = None) -> torch.Tensor:\n feature = self.features(x, x_mask)\n if len(self.old_cols) > 0:\n with torch.no_grad():\n fc1_kb = [old(feature) for old in self.old_fc1s]\n feature = F.relu(self.fc1(feature))\n y = self.adaptor1(torch.cat(fc1_kb, 1))\n out = self.classifier(feature, proto) + y\n else:\n feature = F.relu(self.fc1(feature))\n out = self.classifier(feature, proto)\n return out",
"def _forward_mask(self, features: Dict[str, torch.Tensor], instances: List[Instances]):\n if not self.mask_on:\n return {} if self.training else instances\n\n if self.training:\n # head is only trained on positive proposals.\n instances, _ = select_foreground_proposals(instances, self.num_classes)\n\n if self.mask_pooler is not None:\n features = [features[f] for f in self.mask_in_features]\n boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances]\n features = self.mask_pooler(features, boxes)\n else:\n features = {f: features[f] for f in self.mask_in_features}\n return self.mask_head(features, instances)",
"def threshold_mask(mask, threshold=0.5):\n mask[np.where(mask >= threshold)] = 1.\n mask[np.where(mask < threshold)] = 0.\n return mask",
"def forward(self, x: Tensor, mask: Tensor) -> Tensor:\n x_norm = self.layer_norm(x)\n h = self.src_src_att(x_norm, x_norm, x_norm, mask)\n h = self.dropout(h) + x\n o = self.feed_forward(h)\n return o",
"def forward(self, x):\n\n x_4 = x[0]\n x_p4 = self.P4_conv(x_4)\n x_4_1x7 = self.channel4_1x7_conv(x_4)\n x_p4_1x7 = self.P4_1x7_conv(x_p4)\n x_4 = x_p4_1x7 + x_p4 + x_4_1x7\n x_4 = self.rpn4(x_4)\n\n # predict results of aligned cell region mask and global pyramid mask\n mask_pred = self.conv_logits_seg(x_4)\n reg_pred = self.conv_logits_reg(x_4)\n\n # If upsample is defined, 4x feature maps will be upsampled to 1x feature maps for training\n if self.upsample is not None:\n assert self.upsample_ratio == 4, \"Only support 4x upsample currently\"\n mask_pred = self.upsample(mask_pred)\n\n return mask_pred, reg_pred",
"def sum_threshold(data, plot_progress=False, verbose=False):\n\n thr_f = params.thr_f\n thr_t = params.thr_t\n scales = params.scales\n rho = params.rho \n \n mask = np.copy(data.mask)\n \n thr1_f = thr_f\n thr1_t = thr_t\n \n # do first stage of flagging:\n mask_f = np.greater_equal(np.abs(data-1), thr_f)\n mask_t = np.greater_equal(np.abs(data-1), thr_t)\n #mask_b = np.greater_equal(np.abs(summed_b-1), np.sqrt(thr_f * thr_t))\n mask_s = np.logical_or(mask_f, mask_t)\n #mask_s = np.logical_or(mask_s, mask_b) \n mask = np.logical_or(data.mask, mask_s)\n data[mask] = np.sqrt(thr_f * thr_t)\n \n for window in scales:\n \n thr_f = thr1_f / np.power(rho, np.log2(window))\n thr_t = thr1_t / np.power(rho, np.log2(window))\n \n if window > 1:\n summed_f = filter(data, window, axis=1)\n summed_t = filter(data, window, axis=0)\n #summed_b = filter(summed_f, int(np.sqrt(window)), axis=0, use_bn=use_bn)\n \n mask_f = np.greater_equal(np.abs(summed_f-1), thr_f)\n mask_t = np.greater_equal(np.abs(summed_t-1), thr_t)\n #mask_b = np.greater_equal(np.abs(summed_b-1), np.sqrt(thr_f * thr_t))\n mask_s = np.logical_or(mask_f, mask_t)\n #mask_s = np.logical_or(mask_s, mask_b) \n mask = np.logical_or(data.mask, mask_s)\n data[mask] = 1 + np.sqrt(thr_f * thr_t)\n data.mask = mask\n else:\n summed_f = data\n summed_t = data\n\n if verbose:\n print \"M: %i, Xi_f: %2.2e, Xi_t: %2.2e\" % (window, thr_f, thr_t)\n\n if plot_progress:\n plt.figure()\n plt.subplot(221)\n plt.title(\"summed f: %i\" % window)\n plt.imshow(summed_f, aspect='auto', interpolation='none', rasterized=True)\n plt.colorbar()\n plt.subplot(222)\n plt.title(\"summed t: %i\" % window)\n plt.imshow(summed_t, aspect='auto', interpolation='none', rasterized=True)\n plt.colorbar()\n plt.subplot(223)\n plt.title(\"flagged: %i\" % window)\n plt.imshow(data, aspect='auto', interpolation='none', rasterized=True)\n plt.colorbar()\n if plot_progress:\n plt.show()\n \n return data.mask",
"def postprocessing(self, prediction, prob_thresh=0.5):\n prob_map = self._np_sigmoid(prediction)\n prob_map = self._np_merge_prediction(prob_map)\n if self.resize :\n prob_map = self._np_resize_image(prob_map,\n self.orig_size,\n dtype='float')\n mask = self._np_get_mask(prob_map, prob_thresh=prob_thresh)\n return mask",
"def forward(self, context: Tensor, box: Tensor, mask: Tensor, xy_pdf_score: Tensor) -> Tensor:\n \n# print('context:',context.shape)\n# print('box:',box.shape)\n context_norm = self.layer_norm(context)\n# print('context_norm:',context_norm.shape)\n box_norm = self.box_norm(box)\n# print('box_norm:',box_norm.shape)\n # Calculate xy_pdf_score\n# combine_contex = torch.cat((context_norm, box_norm), dim=-1)\n# combine_contex = self.combine_layer(combine_contex)\n # k, v, q\n h = self.src_src_att(box_norm, context_norm, box_norm, mask, xy_pdf_score)\n h = self.dropout(h) + context_norm\n o = self.feed_forward(h)\n return o",
"def forward(self, pred, target):\n if self.mask:\n target, mask = target\n # todo: loss with mask\n else:\n # todo: loss w/o mask\n pass\n return loss",
"def forward(self, trg: torch.Tensor, trg_mask: torch.Tensor = None):\n trg = trg + self.self_attention(trg, trg, trg, trg_mask)\n trg = self.norm1(trg)\n trg = trg + self.feedforward(trg)\n trg = self.norm2(trg)\n return trg",
"def forward(self, x, mask):\n \"Pass the input (and mask) through each layer in turn\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)",
"def apply_hounsfield_thresholding(data_, threshold: tuple = (200, 600)):\n mask = np.ma.masked_inside(data_, threshold[0], threshold[1], ).mask\n thresholded = np.zeros_like(data_)\n thresholded[mask] = data_[mask]\n return thresholded",
"def add_fe_tcf_suppress(self, suppress_dict):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n #*** Check it's TCP:\n if suppress_dict['proto'] != 'tcp':\n self.logger.error(\"Unsupported proto=%s\", suppress_dict['proto'])\n return 0\n\n #*** Convert IP addresses strings to integers:\n ipv4_src = _ipv4_t2i(str(suppress_dict['ip_A']))\n ipv4_dst = _ipv4_t2i(str(suppress_dict['ip_B']))\n\n #*** Build match:\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_src=ipv4_src,\n ipv4_dst=ipv4_dst,\n ip_proto=6,\n tcp_src=suppress_dict['tp_A'],\n tcp_dst=suppress_dict['tp_B']\n )\n actions = []\n inst = [parser.OFPInstructionActions(\n ofproto.OFPIT_APPLY_ACTIONS, actions),\n parser.OFPInstructionGotoTable(self.ft_tt)]\n #*** Needs higher priority than TC rules in same table:\n priority = 2\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_tcf,\n priority=priority,\n idle_timeout=self.suppress_idle_timeout,\n match=match, instructions=inst)\n self.logger.debug(\"Installing suppress forward FE dpid=%s\", self.dpid)\n self.datapath.send_msg(mod)\n #*** Build counter match (reversed flow):\n match = parser.OFPMatch(eth_type=0x0800,\n ipv4_src=ipv4_dst,\n ipv4_dst=ipv4_src,\n ip_proto=6,\n tcp_src=suppress_dict['tp_B'],\n tcp_dst=suppress_dict['tp_A']\n )\n mod = parser.OFPFlowMod(datapath=self.datapath, table_id=self.ft_tcf,\n priority=priority,\n idle_timeout=self.suppress_idle_timeout,\n match=match, instructions=inst)\n self.logger.debug(\"Installing suppress reverse FE dpid=%s\", self.dpid)\n self.datapath.send_msg(mod)",
"def forward(self, x):\n out = self.conv(x)\n out = out.view(out.size(0), -1)\n\n # critic\n out1 = self.fc1(out)\n\n # auxiliary classifier\n out2 = self.fc10(out)\n\n return out1, out2",
"def forward(self, x, src_states, src_mask, tgt_mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n #print('decoder')\n #print(x.shape)\n x = self.sublayer[1](x, lambda x: self.src_attn(x, src_states, src_states, src_mask))\n #print(x.shape)\n return self.sublayer[2](x, self.feed_forward)",
"def apply_thresholding(x):\n return x > threshold_otsu(x)",
"def thresh_setup():\n pass",
"def forward(self, x):\n # sources保存特征图,loc与conf保存所有PriorBox的位置与类别预测特征\n sources = list()\n loc = list()\n conf = list()\n\n # 对输入图像卷积到conv4_3,将特征添加到sources中\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # 继续卷积到conv7,将特征添加到sources中\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # 继续利用额外的卷积层计算,并将特征添加到sources中\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1: # 间隔一层\n sources.append(x)\n\n # 对sources中的特征图利用类别与位置网络进行卷积计算,并保存到loc与conf中\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n # 对于训练来说,output包括了loc与conf的预测值以及PriorBox的信息\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def apply_threshold(heatmap, threshold):\n heatmap_thresh = np.copy(heatmap)\n ind = np.where(np.logical_and(heatmap_thresh>1, heatmap_thresh<=threshold))\n heatmap_thresh[ind] = 0\n #heatmap_thresh[(heatmap_thresh <= threshold)] = 0\n return heatmap_thresh",
"def forward(self, x, mask):\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n #print('encoder')\n #print(x.shape)\n return self.sublayer[1](x, self.feed_forward)",
"def forward(self, src, src_mask=None, src_key_padding_mask=None):\n # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> Tensor\n src = self.pre_norm(src)\n\n # Self attention layer\n src2 = src\n src2 = self.self_attn(src2, src2, src2, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)\n src2 = src2[0] # no attention weights\n src2 = src2 * self.resweight\n src = src + self.dropout1(src2)\n\n # Pointiwse FF Layer\n if self.factor_ff:\n #src2 = self.fac_linear1(self.dropout(self.activation(self.linear1(src))))\n #src = src + self.dropout2(src2 * self.resweight)\n #src2 = self.linear2(self.dropout(self.activation(self.fac_linear2(src))))\n #src = src + self.dropout2(src2 * self.resweight)\n src2 = self.dropout1(self.fac_linear1(self.activation(self.linear1(src))))\n src2 = self.linear2(self.dropout(self.activation(self.fac_linear2(src2))))\n src = src + self.dropout2(src2 * self.resweight)\n else:\n src2 = src \n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src2 = src2 * self.resweight\n src = src + self.dropout2(src2)\n\n if self.adapter_finetune:\n src2 = src \n src2 = self.ada_linear2(self.ada_dropout1(self.activation(self.ada_linear1(src2))))\n src2 = src2 * self.resweight\n src = src + self.ada_dropout2(src2)\n \n return src",
"def forward(self, x, mask):\n # Compute the parameters for the Bernoulli\n embeddings = self.compute_embedding(x)\n dist_params = self.predict_distribution(embeddings)\n dist_params = dist_params.squeeze() \n # Sample\n sampler = dist.Bernoulli(probs=dist_params)\n actions = sampler.sample()\n \n # Compute LogProba\n log_probas = sampler.log_prob(actions)\n log_probas = apply_mask(log_probas, mask)\n\n # Compute Entropy\n entropy = sampler.entropy()\n entropy = apply_mask(log_probas, mask)\n \n return actions, log_probas, entropy, dist_params"
]
| [
"0.6316641",
"0.624907",
"0.57780576",
"0.5774649",
"0.5764702",
"0.5745862",
"0.57319003",
"0.57285935",
"0.56915194",
"0.56798923",
"0.56551117",
"0.56172776",
"0.5616601",
"0.56086147",
"0.5605365",
"0.56027544",
"0.5601907",
"0.5597806",
"0.5569607",
"0.55548793",
"0.55394894",
"0.5497228",
"0.54831105",
"0.5480723",
"0.54774666",
"0.5469786",
"0.5439313",
"0.54292303",
"0.5426369",
"0.53998536"
]
| 0.6509515 | 0 |
Move ``tens`` tensor to the configured device. | def __call__(self, tens: torch.Tensor):
return tens.to(self.device) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_tensor(tens: Tensorable, device='cpu') -> torch.Tensor:\n if isinstance(tens, torch.Tensor):\n return tens\n return torch.Tensor(tens).to(device)",
"def to(self, device):\n for item in self.data:\n if torch.is_tensor(item):\n item.to(item)\n else:\n for subitem in item:\n subitem.to(device)\n return self",
"def to_device(m: torch.nn.Module, x:torch.Tensor):\n if isinstance(m, torch.nn.Module):\n device = next(m.parameters()).device\n elif isinstance(m, torch.Tensor):\n device = m.device\n else:\n raise TypeError(\n \"Expected torch.nn.Module or torch.tensor, \" f\"bot got: {type(m)}\"\n )\n return x.to(device)",
"def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError",
"def to(self, *args, **kwargs):\n self._tensor = self._tensor.to(*args, **kwargs)\n return self",
"def _create_squeeze(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axes', list(op.axis)),\n ])\n return node",
"def change_device(self, device=None):\n\n if device is None:\n # If the function is called without a device, use the current device\n device = self.device\n\n # Create the appropriate device object\n device = torch.device(f'cuda:{device}'\n if torch.cuda.is_available() else 'cpu')\n\n # Change device field\n self.device = device\n # Load the transcription model onto the device\n self.to(self.device)",
"def to(self, dev):\n self.weight = self.weight.to(dev)\n return self",
"def coord(self, tensor: Union[Tensor, np.ndarray]) -> None:\n try:\n tensor = tensor.reshape(self.shape[0], 3)\n except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray\n raise ValueError(f'got unexpected shape {tensor.shape}')\n if not isinstance(tensor, Tensor):\n tensor = self.tensor.new_tensor(tensor)\n self.tensor[:, :3] = tensor",
"def to_device(self, move_optim_data=True):\n if move_optim_data:\n self.optimizer_values = self.optimizer_values.to(\n self._embeddings.weight.device\n )",
"def setTensor(self, tensor):\t\t\n\t\tself.cur_tensor = tensor\n\t\tif tensor is not None:\n\t\t\tself.output_shape[self.cur_id] = self.cur_tensor.size()\n\t\telse:\n\t\t\tself.output_shape[self.cur_id] = None",
"def TensorAdapter(self) -> tensor_adapter.TensorAdapter:\n return tensor_adapter.TensorAdapter(self.TensorAdapterConfig())",
"def tensorize(\n self,\n texts: Optional[List[List[str]]] = None,\n tokens: Optional[List[List[List[str]]]] = None,\n ):\n raise NotImplementedError",
"def squeeze_batch_dim(nest: types.NestedTensor) -> types.NestedTensor:\n return tree.map_structure(lambda x: tf.squeeze(x, axis=0), nest)",
"def to(self, device):\n self.device = device\n self.model.to(self.device)",
"def _batch_to_device(batch, target_device):\n tensor = _getattr(\"torch\", \"Tensor\")\n for key in batch:\n if isinstance(batch[key], tensor):\n batch[key] = batch[key].to(target_device)\n return batch",
"def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)",
"def set_device(sys_device_id):\n device_id = -1\n cuda = (sys_device_id != -1)\n if cuda:\n # CUDA_VISIBLE_DEVICE is a list, and device_id is the index of its members.\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = str(sys_device_id)\n device_id = 0\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO",
"def batch_to_device(batch):\n for key in batch:\n if isinstance(batch[key], torch.Tensor):\n batch[key] = batch[key].to(device)\n return batch",
"def to_device(data, device):\n if isinstance(data, (list,tuple)): # allows to apply function to lists or tuples of tensors\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)",
"def transpose(tensor):\n raise NotImplementedError",
"def perturb_tensors(self, scaling: str = 'none'):\n if self.clustering is True:\n for cluster in self.clusters:\n cluster.perturb_tensors()\n else:\n for name, item in tqdm(self.tensor_info.items(), 'Perturbing tensors'):\n tens = item[0]\n pert = item[1]\n repr = item[2]\n if repr is not None:\n repr.convert_tensor(tens)\n if pert is not None:\n for perturb in pert:\n if perturb is not None:\n perturb(tens, repr, scaling)",
"def ten2pytrch(img, device):\n img = img[:, 0]\n img = np.transpose(img, [0, 3, 1, 2])\n return torch.from_numpy(img * 2 - 1.0).float().to(device)",
"def tucker_to_tensor(tucker_tensor, skip_factor=None, transpose_factors=False):\n core, factors = tucker_tensor\n return multi_mode_dot(core, factors, skip=skip_factor, transpose=transpose_factors)",
"def transpose(self):\n temp_matrix = [[0] * self.TILES_PER_ROW for _ in range(self.TILES_PER_ROW)]\n for i in range(len(self.main_grid_values)):\n for j in range(len(self.main_grid_values)):\n temp_matrix[j][i] = self.main_grid_values[i][j]\n\n self.main_grid_values = temp_matrix",
"def turn(self):\n new_shape = []\n for shape in self.shape:\n new_shape.append([-shape[1], shape[0]])\n old_shape = self.shape[:]\n self.shape = new_shape\n if not self.board.is_valid_tetromino(self):\n self.shape = old_shape",
"def to_device(self, device):\n for i in range(self.num_layers):\n getattr(self, \"conv{}\".format(i+1)).to_device(device)\n self.to(device)\n return self",
"def tensormul(t1, t2):\n dim1 = t1.get_shape().as_list()[-1]\n dim2 = t2.get_shape().as_list()[-1]\n result_shape_tensors = tf.unstack(tf.shape(t1))\n result_shape_tensors[-1] = dim2\n result_shape_tensor = tf.stack(result_shape_tensors)\n t1 = tf.reshape(t1, [-1, dim1])\n result = tf.matmul(t1, t2)\n result = tf.reshape(result, result_shape_tensors)\n return result",
"def th_to_arr(tens: torch.Tensor) -> np.ndarray:\n return check_tensor(tens).cpu().detach().numpy()",
"def set_devices(sys_device_ids):\n # Set the CUDA_VISIBLE_DEVICES environment variable\n import os\n visible_devices = ''\n for i in sys_device_ids:\n visible_devices += '{}, '.format(i)\n os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices\n # Return wrappers.\n # Models and user defined Variables/Tensors would be transferred to the\n # first device.\n device_id = 0 if len(sys_device_ids) > 0 else -1\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO"
]
| [
"0.5660149",
"0.5313678",
"0.52696735",
"0.50786",
"0.50667024",
"0.5044768",
"0.4843993",
"0.4843944",
"0.48334154",
"0.48139262",
"0.4801744",
"0.47604313",
"0.47573864",
"0.4742376",
"0.47310045",
"0.47277507",
"0.47152445",
"0.4705565",
"0.46590734",
"0.46470731",
"0.460751",
"0.45996886",
"0.45510596",
"0.4540324",
"0.45318815",
"0.44999897",
"0.44897914",
"0.44880748",
"0.44841057",
"0.4483429"
]
| 0.66221935 | 0 |
Get score for given integral image array. | def get_score(self, int_img):
score = 0
if self.type == FeatureType.TWO_VERTICAL:
first = ii.sum_region(int_img, self.top_left, (self.top_left[0] + self.width, int(self.top_left[1] + self.height / 2)))
second = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), self.bottom_right)
score = first - second
elif self.type == FeatureType.TWO_HORIZONTAL:
first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), self.top_left[1] + self.height))
second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), self.bottom_right)
score = first - second
elif self.type == FeatureType.THREE_HORIZONTAL:
first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 3), self.top_left[1] + self.height))
second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 3), self.top_left[1]), (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1] + self.height))
third = ii.sum_region(int_img, (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1]), self.bottom_right)
score = first - second + third
elif self.type == FeatureType.THREE_VERTICAL:
first = ii.sum_region(int_img, self.top_left, (self.bottom_right[0], int(self.top_left[1] + self.height / 3)))
second = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 3)), (self.bottom_right[0], int(self.top_left[1] + 2 * self.height / 3)))
third = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + 2 * self.height / 3)), self.bottom_right)
score = first - second + third
elif self.type == FeatureType.FOUR:
# top left area
first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)))
# top right area
second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), (self.bottom_right[0], int(self.top_left[1] + self.height / 2)))
# bottom left area
third = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), (int(self.top_left[0] + self.width / 2), self.bottom_right[1]))
# bottom right area
fourth = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)), self.bottom_right)
score = first - second - third + fourth
return score | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_score(arr, angle):\n data = inter.rotate(arr, angle, reshape=False, order=0)\n hist = np.sum(data, axis=1)\n score = np.sum((hist[1:] - hist[:-1]) ** 2)\n return score",
"def get_score(self, solution: np.array) -> float:\n pass",
"def _compute_score(img_binary: np.ndarray, s: float) -> float:\n img_sheared = _shear_img(img_binary, s, 0)\n h = img_sheared.shape[0]\n\n img_sheared_mask = img_sheared > 0\n first_fg_px = np.argmax(img_sheared_mask, axis=0)\n last_fg_px = h - np.argmax(img_sheared_mask[::-1], axis=0)\n num_fg_px = np.sum(img_sheared_mask, axis=0)\n\n dist_fg_px = last_fg_px - first_fg_px\n col_mask = np.bitwise_and(num_fg_px > 0, dist_fg_px == num_fg_px)\n masked_dist_fg_px = dist_fg_px[col_mask]\n\n score = sum(masked_dist_fg_px ** 2)\n return score",
"def score_game(game_core):\n \n att_counter = [] \n np.random.seed(1) # fix RANDOM SEED so the experiment is reproducible \n random_array = np.random.randint(1,101, size=(1000))\n for number in random_array:\n att_counter.append(game_core(number))\n score = int(np.mean(att_counter))\n print(f\"Your algorithm guesses on average the number in {score} attempts.\")\n return(score)",
"def score(self, x: np.ndarray) -> np.ndarray:\n score = self.backend.score(self.backend._to_backend_dtype(x))\n return self.backend._to_frontend_dtype(score)",
"def predict(img_tensor):\n scores = [F.l1_loss(digit, img_tensor) for digit in platonic_digits]\n\n lowest = 0\n lowest_score = float(\"inf\")\n for i, s in enumerate(scores):\n if s < lowest_score:\n lowest = i\n lowest_score = s\n return lowest",
"def score_pixels(self, img) -> np.ndarray:\n # Settings to run thresholding operations on\n settings = [{'name': 'lab_b', 'cspace': 'LAB', 'channel': 2, 'clipLimit': 2.0, 'threshold': 150},\n {'name': 'value', 'cspace': 'HSV', 'channel': 2, 'clipLimit': 6.0, 'threshold': 220},\n {'name': 'lightness', 'cspace': 'HLS', 'channel': 1, 'clipLimit': 2.0, 'threshold': 210}]\n\n # Perform binary thresholding according to each setting and combine them into one image.\n scores = np.zeros(img.shape[0:2]).astype('uint8')\n for params in settings:\n # Change color space\n color_t = getattr(cv2, 'COLOR_RGB2{}'.format(params['cspace']))\n gray = cv2.cvtColor(img, color_t)[:, :, params['channel']]\n\n # Normalize regions of the image using CLAHE\n clahe = cv2.createCLAHE(params['clipLimit'], tileGridSize=(8, 8))\n norm_img = clahe.apply(gray)\n\n # Threshold to binary\n ret, binary = cv2.threshold(norm_img, params['threshold'], 1, cv2.THRESH_BINARY)\n\n scores += binary\n\n # Save images\n self.viz_save(params['name'], gray)\n self.viz_save(params['name'] + '_binary', binary)\n\n return cv2.normalize(scores, None, 0, 255, cv2.NORM_MINMAX)",
"def sum_img(img_array):\n sum_img = 0\n counter= 0\n for n in range(len(img_array)):\n sum_img += img_array[n]\n counter += 1\n sum_img = np.asarray(sum_img)\n avg_img = sum_img / (counter)\n return sum_img, avg_img",
"def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'",
"def iqr(self, arr):\n a = np.asarray(arr)\n self.q1 = stats.scoreatpercentile(a, 25)\n self.q2 = stats.scoreatpercentile(a, 50)\n self.q3 = stats.scoreatpercentile(a, 75)",
"def dice_score(binary_image, binary_control):\n # figure_of_control(binary_control, 'Optimal given threshold')\n match = creation_of_match_array(binary_image, binary_control)\n # figure_of_control(match, 'deviation of optimal threshold and otsu')\n true = sum(sum(match))\n false = np.size(match) - true\n score = 2 * true / (2 * true + false)\n # print(\"True hits: \", true)\n # print(\"False hits: \", false)\n # print('Dice score: ', score)\n return score",
"def get_score_matrix(self) -> int:",
"def integral_accuracy(binned_probs):\n ##TODO: Needs to be updated - bin_width is no longet constant\n x_vals = binned_probs['bin_centre'].values\n y_vals = binned_probs['posterior', 'generator'].values\n\n bin_width = x_vals[1] - x_vals[0]\n integral = np.sum(np.multiply(bin_width, y_vals))\n\n return integral",
"def get_vote(self, int_img):\n score = self.get_score(int_img)\n return self.weight * (1 if score < self.polarity * self.threshold else -1)",
"def pixel_score(self,X,Y):\n pred_Y = self.predict(X)\n score = []\n label_size = self.label_width**2\n for i in range(len(Y)):\n score.append(np.sum(Y[i]==pred_Y[i])/label_size)\n mean_score = np.mean(score)\n return mean_score",
"def integral(img, sqSum = False, tilted = False):\n\tif sqSum is False and tilted is False:\n\t\treturn cv2.integral(img)\n\telif sqSum is True and tilted is False:\n\t\treturn cv2.integral2(img)\n\telif sqSum is True and tilted is True:\n\t\treturn cv2.integral3(img)\n\telif sqSum is False and tilted is True:\n\t\tsu, sqsu, tilt = cv2.integral3(img)\n\t\treturn su, tilt\n\telse:\n\t\treturn cv2.integral(img)",
"def get_score(location, grid, shape):",
"def intensity(self) -> int:",
"def score(self, X, y):\n ...",
"def getScore(data):\n return score",
"def inception_score(images):\n height, width = 90, 90\n\n images = np.array([np.array(Image.fromarray(x, mode=\"RGB\").resize((height, width))) for x in np.reshape(images, (-1, 28, 28, 3))]) / 255. # Transform images to a suitable form\n\n with loaded_model[0].as_default():\n predictions = loaded_model[1].predict(images)\n preds = np.argmax(predictions, axis=1)\n aux_preds = np.zeros(10)\n unique, counts = np.unique(preds, return_counts=True)\n for number, appearances in zip(unique, counts):\n aux_preds[number] = appearances\n aux_preds = aux_preds / predictions.shape[0]\n predictions = np.sort(predictions, axis=1)\n predictions = np.mean(predictions, axis=0)\n\n sam_error = np.sum([aux_preds[w] * np.log(aux_preds[w] / predictions[w]) if aux_preds[w] > 0 else 0 for w in range(predictions.shape[0])])\n\n return sam_error",
"def get_mean_score(rating_scores):\n return sum(rating_scores) / len(rating_scores)",
"def getScore(self, i):\n return self.scores[i - 1]",
"def score(self) -> int:\n return self.function(self.x, self.y)",
"def score(self):",
"def calc_integral(self, occr_array=None, H_array=None):\n\n # Return the cached value if possible\n if self._int_cache_flag:\n return self._int_cache\n\n if occr_array is not None:\n self.occr = occr_array\n\n H_array = self.H_bar_array if H_array is None else H_array\n\n #I = np.sum(self.occr * np.sum(self._cpf_grid*self.calc_bin_volumes(),\n # axis=1))\n\n I = np.sum(self.volumise_occr() * H_array)\n\n self._int_cache_flag = True\n self._int_cache = I\n return I",
"def evaluate(state):\r\n if wins(state, COMP):\r\n score = +1\r\n elif wins(state, HUMAN):\r\n score = -1\r\n else:\r\n score = 0\r\n\r\n return score",
"def get_score(p):\n temp = path[round(p[0], 1), round(p[1], 1)] / a_star\n return (clip(1 - temp, a_min=0, a_max=1) + clip(1 - temp, a_min=0, a_max=1) ** 2) / 2",
"def g_score(self):\n _, _, I_CK = self._entropies()\n return 2.0 * I_CK",
"def __call__(self, pred_texture: Image.Image) -> float:\n from plan2scene.evaluation.metric_impl.tileability_mean_metric import compute_mean_tileability\n score = compute_mean_tileability(img=pred_texture, gaus=self.gaus)\n return score"
]
| [
"0.6570716",
"0.61750484",
"0.613723",
"0.59677094",
"0.5933514",
"0.5926071",
"0.5922975",
"0.5788778",
"0.57714677",
"0.57675093",
"0.5666342",
"0.5610269",
"0.56097615",
"0.5585386",
"0.55841637",
"0.5576786",
"0.5557623",
"0.55392784",
"0.55042106",
"0.5503934",
"0.54904944",
"0.54642653",
"0.545614",
"0.5442064",
"0.54119325",
"0.53716415",
"0.53666884",
"0.5366216",
"0.53655416",
"0.53575337"
]
| 0.6954179 | 0 |
Get vote of this feature for given integral image. | def get_vote(self, int_img):
score = self.get_score(int_img)
return self.weight * (1 if score < self.polarity * self.threshold else -1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _getIntFeature(self):\n\n # create args\n valueToGet = c_int64()\n\n errorCode = VimbaDLL.featureIntGet(self._handle,\n self._name,\n byref(valueToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return valueToGet.value",
"def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()",
"def get_score(self, int_img):\n score = 0\n if self.type == FeatureType.TWO_VERTICAL:\n first = ii.sum_region(int_img, self.top_left, (self.top_left[0] + self.width, int(self.top_left[1] + self.height / 2)))\n second = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), self.bottom_right)\n score = first - second\n elif self.type == FeatureType.TWO_HORIZONTAL:\n first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), self.top_left[1] + self.height))\n second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), self.bottom_right)\n score = first - second\n elif self.type == FeatureType.THREE_HORIZONTAL:\n first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 3), self.top_left[1] + self.height))\n second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 3), self.top_left[1]), (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1] + self.height))\n third = ii.sum_region(int_img, (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1]), self.bottom_right)\n score = first - second + third\n elif self.type == FeatureType.THREE_VERTICAL:\n first = ii.sum_region(int_img, self.top_left, (self.bottom_right[0], int(self.top_left[1] + self.height / 3)))\n second = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 3)), (self.bottom_right[0], int(self.top_left[1] + 2 * self.height / 3)))\n third = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + 2 * self.height / 3)), self.bottom_right)\n score = first - second + third\n elif self.type == FeatureType.FOUR:\n # top left area\n first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)))\n # top right area\n second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), (self.bottom_right[0], int(self.top_left[1] + self.height / 2)))\n # bottom left area\n third = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), (int(self.top_left[0] + self.width / 2), self.bottom_right[1]))\n # bottom right area\n fourth = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)), self.bottom_right)\n score = first - second - third + fourth\n return score",
"def get_feature_from_image(self, img_path):\n\n if not tf.gfile.Exists(img_path):\n tf.logging.fatal('File does not exist %s', img_path)\n image_data = tf.gfile.FastGFile(img_path, 'rb').read()\n\n prediction = self.sess.run(self.softmax_tensor, {'DecodeJpeg/contents:0': image_data})\n prediction = np.squeeze(prediction)\n return prediction",
"def get_feature_from_image(img_path):\n global sess, softmax_tensor\n\n if not tf.gfile.Exists(img_path):\n tf.logging.fatal('File does not exist %s', img_path)\n image_data = tf.gfile.FastGFile(img_path, 'rb').read()\n\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n return predictions",
"def get_vote(self, id: int) -> dict:",
"def get_integral(self, key):\n tag = self.get_tag(key)\n if isinstance(tag, (BDTByte, BDTShort, BDTInt, BDTLong)):\n return tag.val\n else:\n raise KeyError(\"No integral value for key %s found in BDTCompound @%#x\" % (key, id(self)))",
"def get_img_feature(self, image_id):\n self.check_img_feature_file()\n self.check_img_feature_offset_map()\n\n if image_id in self.img_feat_offset_map:\n img_offset = self.img_feat_offset_map[image_id]\n self.img_feature_file.seek(img_offset, 0)\n arr = [s.strip() for s in self.img_feature_file.readline().split('\\t')]\n num_boxes = int(arr[1])\n feat = np.frombuffer(base64.b64decode(arr[2]), dtype=np.float32).reshape((-1, self.args.img_feature_dim))\n return feat\n\n return None",
"def get_feature(model, img_tensor, feature_id, device):\n mean = torch.Tensor([0.485, 0.456, 0.406]).to(device).view(1, config.channels, 1, 1)\n std = torch.Tensor([0.229, 0.224, 0.225]).to(device).view(1, config.channels, 1, 1)\n img_normalized = (img_tensor - mean) / std\n feature = model(img_normalized, feature_id)\n return feature",
"def image(request, img_id):\n image = Image.objects.get(pk=img_id)\n if request.user.is_staff or image.is_approved:\n comments = ImageComment.objects.filter(image_id=img_id).order_by('-submission_date')\n comments_and_votes = Vote.objects.get_weighted_scores_in_bulk(comments, request.user)\n\n ctx = {\"img\":image,\n \"comments_and_votes\":comments_and_votes,\n \"image_tags\":image.tags.all(),\n \"all_tags\":Tag.objects.all(),\n \"site\":get_current_site(request)\n }\n return render_to_response('wainz/image.html', ctx , context_instance = RequestContext(request))\n else:\n return HttpResponseRedirect(reverse('wainz.views.composite'))",
"def upvote_feature(request, pk):\n if request.method == \"POST\":\n feature = get_object_or_404(Feature, pk=pk)\n feature.upvotes += 1\n feature.save()\n return redirect('get_feature')",
"def extract_feature_image(img, feature_type, feature_coord=None):\n ii = integral_image(img)\n return haar_like_feature(ii, 0, 0, ii.shape[0], ii.shape[1],\n feature_type=feature_type,\n feature_coord=feature_coord)",
"def extract_feature_image(img, feature_type, feature_coord=None):\n ii = integral_image(img)\n return haar_like_feature(ii, 0, 0, ii.shape[0], ii.shape[1],\n feature_type=feature_type,\n feature_coord=feature_coord)",
"def get_classification(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (360,270), interpolation = cv2.INTER_CUBIC)\n\n with self.graph.as_default():\n result = self.model.predict(image[None, :, :, :], batch_size=1).squeeze()\n id = np.argmax(result)\n return id, result[id]",
"def integral(img, sqSum = False, tilted = False):\n\tif sqSum is False and tilted is False:\n\t\treturn cv2.integral(img)\n\telif sqSum is True and tilted is False:\n\t\treturn cv2.integral2(img)\n\telif sqSum is True and tilted is True:\n\t\treturn cv2.integral3(img)\n\telif sqSum is False and tilted is True:\n\t\tsu, sqsu, tilt = cv2.integral3(img)\n\t\treturn su, tilt\n\telse:\n\t\treturn cv2.integral(img)",
"def vote(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'votes')\r\n request = http.Request('POST', url, {'to': '1'})\r\n\r\n return request, parsers.parse_json",
"def getAction(self,img):\n \n action = self.learner.getAction(img)\n\n return action",
"def _UHVI_indicator(self, kernel):\n return self._UHVI_indicator_archive(kernel).hypervolume_improvement",
"def recognition_system__get_image_feature(args):\n img_idx, img_path = args\n \n util.dbg_print('\\t Extracting Feature from Image {}'.format(img_idx))\n \n opts = recognition_system__worker_cache['opts']\n dictionary = recognition_system__worker_cache['dictionary']\n \n return get_image_feature(opts, img_path, dictionary)",
"def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'",
"def predict(self, img):\n return self._predict([img])[0]",
"def extract_feat(self, img):\n x = self.backbone(img)\n y = self.backbone_gan(img)\n if self.with_feature_selection:\n x, y = self.feature_selection(x, y)\n if self.with_neck:\n x = self.neck(x)\n return x, y",
"def get_classification(self, image):\n if self.model is not None:\n im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n im = im.astype('float32')\n im = preprocess_input(im)\n im_array = np.asarray(im)\n transformed_im_array = im_array[None, :, :, :]\n with self.graph.as_default():\n preds = self.model.predict(transformed_im_array, batch_size=1)\n return np.argmax(preds[0])\n return TrafficLight.UNKNOWN",
"def cmd_gallery_tag_vote(client, args):\n gallery_tag_vote = client.gallery_tag_vote(args.item_id, args.tag, args.vote)\n generate_output({'gallery_tag_vote': gallery_tag_vote})",
"def __getitem__(self, index):\n img = Image.open(os.path.join(self.img_path, self.imgs[index][0]))\n label = float(self.imgs[index][1]) if self._fine_tune or self._test else int(float(self.imgs[index][1])) - 1\n return self.preproc(img), torch.tensor(label)",
"def vote(self, agents):\n\n # If the impostors have a set target, vote that\n if self.target != -1:\n vote = self.target\n else: # Vote a random living agents\n vote = random.sample([a.agent_id for a in agents if not a.agent_id == self.agent_id and a.alive and not a.is_impostor()], 1)[0]\n\n self.target = -1\n self.logger.log(f\"Impostor {self.agent_id} votes for {vote}\", Logger.LOG | Logger.PRINT_VISUAL)\n return vote",
"def get(self, image_id: str) -> typing.Dict:\n return self.annotation.get(image_id)",
"def value(self):\n return self._image._A if self._image else None",
"def t(p, vote_count):\n return vote_count[p]",
"def process(self):\n\n if self.__user.get_test_image() is None:\n return None\n else:\n # Initializing and assigning the variable 'result' to True\n result = None\n\n # Loading the features into a list and assigning it as 'x'\n x = [[self.__user.get_gender(), self.__user.get_handedness(), self.__user.get_age(),\n self.__user.get_test_image().get_rms(), self.__user.get_test_image().get_max_ht(),\n self.__user.get_test_image().get_min_ht(), self.__user.get_test_image().get_std_deviation_st_ht(),\n self.__user.get_test_image().get_mrt(), self.__user.get_test_image().get_max_ht(),\n self.__user.get_test_image().get_min_ht(), self.__user.get_test_image().get_std_ht(),\n self.__user.get_test_image().get_changes_from_negative_to_positive_between_st_ht()]]\n\n # Opening the voting classifier pickle file and storing the model in the variable 'ensemble_classifier'\n with open('models/VotingClassifier.pickle', 'rb') as file:\n ensemble_classifier = pickle.load(file)\n\n # Predicting the result using the loaded features of the user\n y_pred = ensemble_classifier.predict(x)\n\n # If the predicted result returns '2', then assign result as True\n if y_pred == 2:\n result = True\n\n # If the predicted result returns '1', then assign result as False\n elif y_pred == 1:\n result = False\n\n return result"
]
| [
"0.57404464",
"0.57267016",
"0.56636065",
"0.55443347",
"0.55070406",
"0.5464179",
"0.5460809",
"0.5402727",
"0.5386541",
"0.53574616",
"0.5319061",
"0.5227346",
"0.5227346",
"0.5212558",
"0.5194332",
"0.5168318",
"0.50867814",
"0.508197",
"0.5074384",
"0.5070799",
"0.5053127",
"0.5038056",
"0.5017391",
"0.5012716",
"0.50105226",
"0.49784434",
"0.49476486",
"0.4940159",
"0.49243852",
"0.4912889"
]
| 0.7510949 | 0 |
Builds an initial dictionary, just with directory name and its size. Returns a dictionary with list as a value. | def builddictionary(dirlist):
init_dictionary={}
for string in dirlist:
splitstring=string.split("\t")
if len(splitstring) == 2:
init_dictionary[splitstring[1].strip("\n")] = [int(splitstring[0]), 0]
return init_dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_stat_dic(stat_id, data_directories):\n station_dic = {}\n total_dim = []\n for d,i in data_directories.items():\n files = os.listdir(i)\n for f in files:\n Id = f.split('_'+d)[0]\n if Id == stat_id:\n if d not in station_dic.keys():\n station_dic[d] = [] \n station_dic[d].append(i + '/' + f)\n \n total_dim. append( os.path.getsize (i + '/' + f) )\n \n #print('FOUND!' , d , ' ' , f )\n \n size = sum(total_dim) \n return station_dic, size",
"def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict",
"def directory_to_json(self, path, list_in):\n directory_json = {\"base_path\": path, \"files\": list_in}\n return directory_json",
"def buildDict(self, dict):\n for item in dict:\n length = len(item)\n if length not in self.dic:\n self.dic[length] = [item]\n else:\n self.dic[length].append(item)",
"def parse(readDataInstance):\n if len(readDataInstance) == consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8:\n newDataDirectory = DataDirectory()\n for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES):\n newDataDirectory[i].name.value = dirs[i]\n newDataDirectory[i].rva.value = readDataInstance.readDword()\n newDataDirectory[i].size.value = readDataInstance.readDword()\n else:\n raise excep.DirectoryEntriesLengthException(\"The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.\")\n return newDataDirectory",
"def collect(dname='.'):\n files = {}\n\n for paths in os.walk(dname):\n for fname in paths[2]:\n flen = len(fname)\n fpath = os.path.join(paths[0], fname)\n try:\n files[flen].append(fpath)\n except KeyError:\n files[flen] = [fpath]\n\n return files",
"def filelist_create(self, directory=\".\"):\n # Get data from ls -lh and parse it correctly\n files = listdir_wrapper(directory, self.show_hidden)\n self.filesize = {}\n for fil in files:\n # Number of images in directory as filesize\n if os.path.isdir(fil):\n try:\n subfiles = listdir_wrapper(fil, self.show_hidden)\n # Necessary to keep acceptable speed in library\n many = False\n if len(subfiles) > self.file_check_amount:\n many = True\n subfiles = [subfile\n for subfile in subfiles[:self.file_check_amount]\n if is_image(os.path.join(fil, subfile))]\n amount = str(len(subfiles))\n if subfiles and many:\n amount += \"+\"\n self.filesize[fil] = amount\n except:\n self.filesize[fil] = \"N/A\"\n else:\n self.filesize[fil] = sizeof_fmt(os.path.getsize(fil))\n\n return files",
"def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict",
"def get_dir_size(dir_path):\n result = {}\n for root, dirs, files in os.walk(dir_path):\n for directory in dirs:\n result.update({directory: get_dir_size_helper(join(root, directory))})\n result.update({filename: getsize(join(root, filename)) for filename in files})\n break\n return result",
"def get_directory(self, directory: str) -> List[Dict]:\n raise NotImplementedError",
"def buildDict(self, words):\n for word in words:\n length = len(word)\n key = \"{}/{}\".format(length, word[0])\n ls = self.origin.get(key, [])\n ls.append(word)\n self.origin[key] = ls",
"def __init__(self, shouldPack = True):\n self.shouldPack = shouldPack\n \n for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES):\n dir = Directory()\n dir.name.value = dirs[i]\n self.append(dir)",
"def dirsize_get(l_filesWithoutPath, **kwargs):\n\n str_path = \"\"\n for k,v in kwargs.items():\n if k == 'path': str_path = v\n\n d_ret = {}\n l_size = []\n size = 0\n for f in l_filesWithoutPath:\n str_f = '%s/%s' % (str_path, f)\n if not os.path.islink(str_f):\n try:\n size += os.path.getsize(str_f)\n except:\n pass\n str_size = pftree.sizeof_fmt(size)\n\n return {\n 'status': True,\n 'diskUsage_raw': size,\n 'diskUsage_human': str_size\n }",
"def dir_creation():\n for i in range(dir_count):\n dir_name = str(i * nums)\n max_len = len(str(nums * dir_count))\n # if the len is equal, add stright forward\n if len(dir_name) == max_len:\n container.append(dir_name)\n while len(dir_name) < max_len:\n # tambahkan angka 0 sampai len(dir_name) == max_len\n dir_name = \"0\" + dir_name\n if len(dir_name) == max_len:\n container.append(dir_name)\n break\n return container",
"def get_directory_structure(rootdir):\n rootdir = rootdir.rstrip(os.sep)\n start = rootdir.rfind(os.sep) + 1\n dir= {\"containers\": [rootdir]} \n for path, dirs, files in os.walk(rootdir):\n folders = path[start:].split(os.sep)\n\n subdir = dict.fromkeys(files)\n parent = functools.reduce(dict.get, folders[:-1], dir)\n \n config = get_container_config(path, folders, subdir)\n \n parent[folders[-1]] = {'containers': dirs}\n parent[folders[-1]].update(config)\n \n return dir",
"def getFolderStructure(self, path):\n directoryList = {}\n for root, dirs, files in os.walk(path):\n folderList = root.split(os.sep)\n\n folders = directoryList\n\n for eachFolder in folderList:\n folders = folders.setdefault(eachFolder, {})\n return directoryList",
"def list_files_in_directory(self):\n lesson_file_dict = dict()\n lesson_file_dict[\"files\"] = []\n\n directory_list = listdir(self.sub_dir)\n for directory in directory_list:\n if isfile(join(self.sub_dir, directory)):\n lesson_file_dict[\"files\"].append(directory)\n\n return lesson_file_dict",
"def _build_memorymap(self):\n\t\tmemorymap = {}\n\t\ttotalsize = 0\n\t\tbaserva = self.liststream64.DirectoryData.BaseRva\n\t\tmmdscrptr64 = self.liststream64.DirectoryData.MINIDUMP_MEMORY_DESCRIPTOR64\n\t\tnumberofmemoryranges = self.liststream64.DirectoryData.NumberOfMemoryRanges\n\t\tfor i in range(numberofmemoryranges):\n\t\t\tmemorymap[mmdscrptr64[i].StartOfMemoryRange] = ((baserva + totalsize),mmdscrptr64[i].DataSize)\n\t\t\ttotalsize += mmdscrptr64[i].DataSize\n\t\treturn memorymap",
"def CountsByDirname(dict_of_list):\n r = {}\n for path in dict_of_list:\n dirname = os.path.dirname(path)\n r.setdefault(dirname, 0)\n r[dirname] += 1\n return r",
"def _create_directory_entries(self, key, config):\n # Initialize key variables\n updated = False\n dir_dict = {\n 'log_directory': 'log',\n 'ingest_cache_directory': 'cache',\n }\n directory = general.root_directory()\n\n # Setup the key value to a known good default\n if key in config['main']:\n # Verify whether key value is empty\n if config['main'][key] is not None:\n # Create\n if os.path.isdir(config['main'][key]) is False:\n config['main'][key] = ('%s/%s') % (\n directory, dir_dict[key])\n updated = True\n else:\n config['main'][key] = ('%s/%s') % (directory, dir_dict[key])\n updated = True\n else:\n config['main'][key] = ('%s/%s') % (directory, dir_dict[key])\n updated = True\n\n # Return\n return (updated, config)",
"def __create_dir_structure_file__(self):\n # | - __create_dir_structure_file__\n\n dir_structure_data = {}\n dir_structure_data[\"tree_level_labels\"] = self.tree_level_labels\n dir_structure_data[\"level_entries_dict\"] = self.level_entries_list\n # TEMP\n dir_structure_data[\"skip_dirs\"] = self.skip_dirs_lst\n\n fle_name = os.path.join(\n self.root_dir,\n self.working_dir,\n \"jobs_bin/dir_structure.json\",\n )\n\n with open(fle_name, \"w\") as fle:\n json.dump(dir_structure_data, fle, indent=2)\n # __|",
"def get_dir(root_dir):\n\n dir_dict = {}\n\n for item in os.scandir(root_dir):\n item_type = \"\"\n\n if item.is_file():\n item_type = \"[FILE]\"\n elif item.is_dir():\n item_type = \"[DIR]\"\n\n dir_dict[item.name] = item_type\n\n return dir_dict",
"def filelist(folder):\n file_dict={}\n folderlist = glob.glob(os.getcwd()+\"/\"+folder+\"/*\")\n for i in tqdm(folderlist):\n filelist = glob.glob(i+\"/*\")\n filename = i.rsplit(\"/\")[-1]\n file_dict[filename]= filelist\n\n return file_dict",
"def __init__(self, dirname, defmode='r'):\n self.name = dirname\n self.defmode = defmode\n\n self.items = []\n\n for i in os.listdir(dirname):\n if os.path.isdir(os.path.join(dirname, i)):\n self.items.append(Tree(os.path.join(dirname, i), defmode))\n\n else:\n self.items.append(open(os.path.join(dirname, i), defmode))\n\n self._dict = self.to_dict()",
"def create_directory(self, directory: str) -> Dict:\n raise NotImplementedError",
"def build_filelist(input_dir: str, syst: bool = False) -> dict:\n filedict = {\n idir.split('SYST_')[-1].split('/')[0]: {}\n for idir in glob('{}/*'.format(input_dir)) if 'SYST_' in idir\n }\n\n filedict['nominal'] = build_groupings(f'{input_dir}/NOMINAL')\n if syst:\n for idir in filedict.keys():\n if idir == 'nominal':\n continue\n elif 'Rivet' in idir:\n continue\n filedict[idir] = build_groupings(f'{input_dir}/SYST_{idir}')\n else:\n filedict = {'nominal': filedict['nominal']}\n\n pprint(filedict, width=150)\n return filedict",
"def survey_library(folders):\n res = {}\n cnt = 0\n for folder in folders:\n for dpath, dirs, fnames in os.walk(folder):\n for base_fname in fnames:\n cnt += 1\n base_fpath = os.path.join(dpath, base_fname).replace('\\\\', '/')\n try:\n flen = os.stat(base_fpath).st_size\n fpaths = res.setdefault(flen, [])\n fpaths.append(base_fpath)\n except (FileNotFoundError, IOError):\n # If it won't stat, don't consider it. But report...\n print(\"Error stat-ing:\", base_fpath)\n pass\n\n # just out of curiosity, how good it my key choice?\n # It would be perfect if no length has more than one file.\n dups = 0\n for fpaths in res.values():\n if len(fpaths) > 1:\n dups += 1\n print(\"nFiles:\", cnt)\n print(\"nLengths:\", len(res))\n print(\"Lengths with more than 1 file:\", dups)\n\n return res",
"def size(path: str) -> dict:\n file_sizes = {}\n for root, dirs, files in os.walk(os.path.normpath(f'./{path}/'), topdown=False):\n size = sum([os.path.getsize(os.path.join(root, f)) for f in files])\n file_sizes[root] = size\n\n for dir_ in dirs:\n path = os.path.join(root, dir_)\n if path in file_sizes:\n file_sizes[root] += file_sizes[path]\n\n # Convert all sizes in bytes to bytes, MB, GB, etc.\n for path, size in file_sizes.items():\n file_sizes[path] = convert(size)\n \n return file_sizes",
"def _build_dictionary(self):\n print(\"Building Dictionary...\")\n self.dictionary = Dictionary(self.load_files())",
"def cleanup(dict):\n from itertools import groupby\n from operator import itemgetter\n tuplelist = []\n for dirname, data in groupby(sorted(dict.items(),key=itemgetter(1)),key=itemgetter(1)):\n data = list(data)\n mx = max(data,key=lambda x:len(x[0]))\n tuplelist += [x for x in data if len(x[0]) == len(mx[0])]\n tuplelist.sort()\n dict = {}\n for dirname, data in tuplelist:\n #print(dirname, data)\n dict[dirname] = data\n return dict"
]
| [
"0.64317083",
"0.6129734",
"0.59935033",
"0.5906379",
"0.5903641",
"0.58926314",
"0.5888912",
"0.5844981",
"0.58379143",
"0.5773314",
"0.5679448",
"0.566208",
"0.56548357",
"0.56207246",
"0.5608621",
"0.5608046",
"0.5601262",
"0.55769926",
"0.555732",
"0.5555636",
"0.55360043",
"0.5520774",
"0.54957175",
"0.54885894",
"0.54842013",
"0.5480415",
"0.54288346",
"0.5425451",
"0.5421504",
"0.53888214"
]
| 0.62564236 | 1 |
Cleans a dictionary up, removing parent directories with duplicate data for them, leaving only directories that changed. | def cleanup(dict):
from itertools import groupby
from operator import itemgetter
tuplelist = []
for dirname, data in groupby(sorted(dict.items(),key=itemgetter(1)),key=itemgetter(1)):
data = list(data)
mx = max(data,key=lambda x:len(x[0]))
tuplelist += [x for x in data if len(x[0]) == len(mx[0])]
tuplelist.sort()
dict = {}
for dirname, data in tuplelist:
#print(dirname, data)
dict[dirname] = data
return dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recursive_dictionary_clear(d):\n for key in list(d):\n if isinstance(d[key], dict):\n recursive_dictionary_clear(d[key])\n\n if d[key] == \"\" or d[key] == {}:\n del d[key]",
"def data_clean_up(dict_of_dfs, outdir):\n # rvs_to_be_shared = id_mapping['SUBJECT_NUMBER'].tolist()\n for key, df in dict_of_dfs.items():\n for ind, subjnum in df['participant_id'].items():\n if subjnum in rvs_to_be_shared:\n onid = openneuro_id_lookup(subjnum)\n if onid:\n dict_of_dfs[key].at[ind, 'participant_id'] = '-'.join(['sub', onid])\n else:\n dict_of_dfs[key] = dict_of_dfs[key].drop(index=ind, axis=0)\n dict_of_dfs[key] = reorder_cols(dict_of_dfs[key])\n dict_of_dfs[key] = remove_blank_rows(dict_of_dfs[key])\n dict_of_dfs = remove_nans(dict_of_dfs)\n return dict_of_dfs",
"def keep_entry(dict_input, parent_key, child_keys):\n\n dict_output = dict()\n\n child_keys = [''.join((parent_key, '_', child_key)) for child_key in child_keys]\n\n for key, value in dict_input.items():\n if key.startswith(parent_key) and key not in child_keys:\n pass\n else:\n dict_output.update({key: value})\n\n return dict_output",
"def clean_map(params):\n if tf.gfile.IsDirectory(params.vocab_path):\n tf.gfile.DeleteRecursively(params.vocab_path)\n\n if tf.gfile.IsDirectory(params.map_path):\n tf.gfile.DeleteRecursively(params.map_path)\n\n if tf.gfile.IsDirectory(params.best_ckpt_path):\n tf.gfile.DeleteRecursively(params.best_ckpt_path)",
"def adddifftodictionary(dict):\n for directory in dict:\n if len(dict[directory]) == 2:\n dict[directory].append(dict[directory][1] - dict[directory][0])\n return dict",
"def remove_empty ( self ):\n with self._lock:\n for key in tuple ( self._subdirs.keys() ):\n if self._subdirs [key].check_empty():\n del self._subdirs [key]",
"def repair_path(dict_1):\n dup_dict = dict(dict_1)\n for k,v in dup_dict.items():\n if '\\\\' in k:\n key = k.replace('\\\\', '/')\n val = v.replace('\\\\', '/')\n del dict_1[k]\n dict_1[key] = val\n return dict_1",
"def remove_duplicates(self, tree={}):\n if not tree: tree=self.dict\n childrens = tree['childrens']\n for child in childrens:\n self.remove_duplicates(tree=child)\n key = f'{child[\"app_name\"]}_{child[\"model_name\"]}_{child[\"source_pk\"]}'\n if key in self.depth_reference:\n if child['depth'] < self.depth_reference[key]:\n child['save'] = False",
"def prune(self): # HashMap.prune\n for hashval, list in self.contentHash.iteritems():\n newlist=[]\n for entry in list:\n if not entry.deleted:\n newlist.append(entry)\n self.contentHash[hashval]=newlist",
"def cleanup(child):\n children = child.get('children', [])\n for childchild in children:\n cleanup(childchild)\n cleaned = {u'title': child['Title'], u'name': child['id'],\n u'children': children}\n child.clear()\n child.update(cleaned)",
"def dict_cleanup(self, data):\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n if filter_value not in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n return data",
"def cleanup_dict_infos(self, list_del_sha1s):\n for sha1 in list_del_sha1s:\n try:\n del self.dict_sha1_infos[str(sha1)]\n except:\n # could happen when cleaning up duplicates or image processed by another process\n pass",
"def reconcile_dicts(host: Component, old: Dict, new: Dict) -> Dict:\n reconciled_dict = {}\n for key, new_child in new.items():\n reconciled = reconcile(host, key, None, old.pop(key, None), new_child)\n if reconciled is not None:\n reconciled_dict[key] = reconciled\n for key, old_child in old.items():\n _unmounted(host, key, old_child)\n return reconciled_dict",
"def remove_dup(files1, dict_3, files2):\n l1 = files1[:]\n for i in l1:\n if '/' not in i:\n if i in files2:\n files1.remove(i)\n del dict_3[i]\n return files1",
"def find_duplicates(directories):\n md = sha1sums(directories)\n # prune multidict, only keep files that are duplicates\n # use list() to iterate first so dict doesnt change size while pop()ing\n for digest,paths in list(md.iteritems()):\n if len(paths) < 2:\n md.pop(digest)\n \n return md",
"def clean_dict(to_clean):\n for k in list(to_clean.keys()):\n if not to_clean.get(k):\n to_clean.pop(k)",
"def pclean(self):\n path_list_pruned = []\n for p in self.path_list:\n if not os.path.exists(p):\n print(\"Does not exist! \", p)\n elif p in path_list_pruned:\n print(\"Duplicate found \", p)\n else:\n p = os.path.normpath(p) # remove double slashes and stuff\n path_list_pruned.append(p)\n\n self.path_list = path_list_pruned\n self.pupdate()",
"def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)",
"def traverse_dict_and_add(self, rootDir, dictH):\n origRootDir = rootDir\n for key, item in dictH.iteritems():\n if item is None or item == {} or item == []:\n attemptedJoin = os.path.normpath(os.path.join(rootDir, key))\n keyPath = None\n if not os.path.isabs(key) and (os.path.isdir(attemptedJoin) or\n os.path.isfile(attemptedJoin)):\n # copy the found file/folder to directory\n keyPath = attemptedJoin\n if os.path.isabs(key) and (os.path.isfile(key) or\n os.path.isdir(key)):\n # copy file/folder to the root location\n if not os.path.isdir(rootDir):\n paths.mkdir_p(rootDir)\n keyPath = paths.path_leaf(key)\n copyLoc = os.path.join(rootDir, keyPath)\n shutil.copy2(key, copyLoc)\n continue # skip the rest of this iteration\n\n if keyPath is not None and not os.path.isdir(keyPath):\n # the string was either not a file/folder or couldn't be\n # resolved from a relative path into a file/folder\n #\n copyLoc = paths.path_leaf(keyPath)\n copyLoc = os.path.join(rootDir, copyLoc)\n print copyLoc\n shutil.copy2(key, copyLoc)\n elif keyPath is None:\n # no directory exists at this location, create one\n dirToMake = os.path.normpath(os.path.join(rootDir, key))\n os.makedirs(dirToMake)\n # sys.exit('Got: \"{f}\", couldn\\'t resolve '\n # 'into file or folder'.format(f=key))\n\n elif isinstance(item, dict):\n newRootDir = os.path.join(rootDir, key)\n newRootDir = os.path.normpath(newRootDir)\n self.traverse_dict_and_add(rootDir=newRootDir,\n dictH=dictH[key])\n else:\n sys.exit('Got: \"{f}\", expected a dictionary, '\n '\\{\\} or None'.format(f=item))",
"def _clear_ancestor_caches(self):\r\n for page in Page.objects.get(id=self.id).get_ancestors():\r\n key = 'stoat:pages:%d:children' % (page.id)\r\n cache.delete(key)",
"def tree_copy_duplicate_removal(in_tree, out_tree, key, keys):\n for entry in in_tree:\n key_value = getattr(entry, key)\n if not key_value in keys:\n out_tree.Fill()\n keys.add(key_value)",
"def drop_keys(d):\n if isinstance(d, dict):\n return {\n k: drop_keys(v)\n for k, v in d.items()\n if k not in [\"propNames\", \"package\"]\n and v is not None\n and not (k == \"children\" and v == \"\")\n }\n elif isinstance(d, list):\n return [drop_keys(x) for x in d]\n return d",
"def resolve(self): # HashMap.resolve\n prevCount = self.allFiles.count_deleted()\n\n # no need to resolve uniques, so remove them from the HashMap\n deleteList=[]\n for hashval, list in self.contentHash.iteritems():\n if len(list) == 1:\n deleteList.append(hashval)\n for e in deleteList:\n del self.contentHash[e]\n\n # delete the directories first, in order of\n # increasing depth\n if verbose:\n print '# checking candidates from depth ' + str(self.minDepth) + ' through ' + str(self.maxDepth)\n for currentDepth in xrange(self.minDepth-1,self.maxDepth+1):\n for hashval, list in self.contentHash.iteritems():\n example = list[0]\n if isinstance(example, DirObj):\n winner, losers = resolve_candidates(list, currentDepth)\n if losers != None:\n for loser in losers:\n if not loser.deleted:\n if verbose:\n print '# dir \"' + loser.pathname + '\" covered by \"' + winner.pathname + '\"'\n self.delete(loser)\n loser.winner = winner\n self.prune()\n\n for hashval, list in self.contentHash.iteritems():\n example = list[0] \n if isinstance(example, FileObj):\n winner, losers = resolve_candidates(list)\n for loser in losers:\n if not loser.deleted:\n if verbose:\n print '# file \"' + loser.pathname + '\" covered by \"' + winner.pathname + '\"'\n self.delete(loser)\n loser.winner = winner\n\n return self.allFiles.count_deleted() - prevCount",
"def clean(self):\n filtered_items = {}\n for name, ls in self.items.items():\n filtered_ls = []\n for i in ls:\n if i.alive():\n filtered_ls.append(i)\n else:\n self.del_item(i)\n filtered_items[name] = filtered_ls\n self.items = filtered_items",
"def _mv_to_root(map):\n if METADATA_KEY in map:\n for mk in list(map[METADATA_KEY].keys()):\n if mk not in map:\n map[mk] = map[METADATA_KEY][mk]\n del map[METADATA_KEY][mk]\n _LOGGER.debug(\"Section {m}.{k} moved to {k}\".\n format(m=METADATA_KEY, k=mk))\n del self[CONFIG_KEY][METADATA_KEY]",
"def clearMap(self):\n for key in self.componentMap.keys():\n del self.componentMap[key][:]",
"def fix_from(root, infos):\n # Assume root has correct parents (or none)\n old_hash = root['old_hash']\n new_hash = sha1(info2str(root).encode('latin1')).hexdigest()\n for info in infos:\n if not 'parents' in info:\n continue\n if old_hash in info['parents']:\n index = info['parents'].index(old_hash)\n info['parents'][index] = new_hash\n info['fixed_parents'][index] = True",
"def purge_deleted_directories(self):\n registered = {safe_filename(obj.name) for obj in self}\n bad_directories = [\n self._base_data_dir / dirname\n for dirname in os.listdir(self._base_data_dir)\n if (self._base_data_dir / dirname).is_dir() and dirname not in registered\n ]\n\n for fp in bad_directories:\n shutil.rmtree(fp)\n\n return len(bad_directories)",
"def clear():\n global d\n for key in d.keys():\n del d[key]",
"def _clean_paths(paths):\n\n\tclean_paths = {key: np.concatenate([path[key] for path in paths]) for key in paths[0].keys()}\n\n\treturn clean_paths"
]
| [
"0.63620996",
"0.60965484",
"0.5918594",
"0.59049326",
"0.5871248",
"0.58565223",
"0.5832315",
"0.579622",
"0.57943666",
"0.57544166",
"0.57085323",
"0.5701595",
"0.5678628",
"0.56445706",
"0.5629633",
"0.56238925",
"0.5622825",
"0.5610778",
"0.5609478",
"0.5595599",
"0.5588845",
"0.5573341",
"0.5571819",
"0.5556879",
"0.5531713",
"0.5515912",
"0.5512303",
"0.54997134",
"0.5486234",
"0.5478263"
]
| 0.70737034 | 0 |
Prune features whose correlation score is above a given threshold. | def prune_corr_features(X_feat, threshold):
feats = list(set(feat for f_set in X_feat for feat in f_set))
num_before = len(feats)
step = 2000
for k in range(5):
to_keep = set()
random.shuffle(feats)
for i in range(0, len(feats), step):
size = min(step, len(feats) - i)
x = np.zeros((len(X_feat), size))
sub_feats = sorted(
feats[i : i + size], key=lambda f: 30 * f.count(FEAT_JOINER) - len(f)
)
for j, x_f in enumerate(sub_feats):
idx = [x_f in f_set for f_set in X_feat]
x[idx, j] = 1
corr, _ = spearmanr(x)
corr = np.triu(corr, k=1)
corr = np.any(np.abs(corr) > threshold, axis=0)
to_keep.update(feat for n, feat in enumerate(sub_feats) if not corr[n])
log.debug(f"At {i:4d}: eliminated {sum(corr):3d} features")
feats = list(to_keep)
log.debug(f"Iteration {k+1}: kept {len(feats)} after pruning")
return X_pruned, num_before - len(to_prune) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_correlated_features(x, threshold=0.9):\n x_copy = np.copy(x)\n \n corr_matrix = np.corrcoef(x_copy, rowvar=False)\n # Set to False highly correlated columns\n nb_col = len(corr_matrix)\n columns = np.full((nb_col,), True, dtype=bool)\n for i in range(nb_col):\n for j in range(i+1, nb_col):\n if corr_matrix[i, j] >= threshold:\n if columns[i]:\n columns[j] = False\n \n # Remove correlated features and concat categorical features\n return x_copy[:, columns], columns",
"def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)",
"def remove_redundants(tr_x, tr_y, threshold = 0.95): \n corrm = np.corrcoef(np.hstack([tr_x,tr_y.reshape((-1,1))]).T)\n rows, cols = np.where((corrm>threshold) & (corrm<1.0))\n idx = [c for r,c in zip(rows,cols) if (c>r)]\n tr_x_removed = np.delete(tr_x, idx, axis=1)\n return tr_x_removed, idx",
"def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]",
"def remove_highly_correlated_vars_fast(df, corr_limit=0.70):\r\n # Creating correlation matrix\r\n correlation_dataframe = df.corr().abs().astype(np.float16)\r\n # Selecting upper triangle of correlation matrix\r\n upper_tri = correlation_dataframe.where(np.triu(np.ones(correlation_dataframe.shape),\r\n k=1).astype(np.bool))\r\n # Finding index of feature columns with correlation greater than 0.95\r\n to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > corr_limit)]\r\n print();\r\n print('Highly correlated columns to remove: %s' %to_drop)\r\n return to_drop",
"def filter_collinearity(c, threshold):\n\t# ensure symmetric\n\tif c.shape[0] != c.shape[1]:\n\t\traise ValueError('input dataframe should be symmetrical in dimensions')\n\n\t# init drops list\n\tdrops = []\n\tmacor = [] # mean abs corrs\n\tcorrz = [] # the correlations\n\n\t## Iterate over each feature\n\tfinished = False\n\twhile not finished:\n\n\t\t# Whenever there's a break, this loop will start over\n\t\tfor i,nm in enumerate(c.columns):\n\t\t\tthis_col = c[nm].drop(nm).sort_values(na_position='first') # gets the column, drops the index of itself, and sorts\n\t\t\tthis_col_nms = this_col.index.tolist()\n\t\t\tthis_col = np.array(this_col)\n\n\t\t\t# check if last value is over thresh\n\t\t\tmax_cor = this_col[-1]\n\t\t\tif pd.isnull(max_cor) or max_cor < threshold or this_col.shape[0] == 1:\n\t\t\t\tif i == c.columns.shape[0] - 1:\n\t\t\t\t\tfinished = True\n\n\t\t\t\t# control passes to next column name or end if finished\n\t\t\t\tcontinue\n\n\t\t\t# otherwise, we know the corr is over the threshold\n\t\t\t# gets the current col, and drops the same row, sorts asc and gets other col\n\t\t\tother_col_nm = this_col_nms[-1]\n\t\t\tthat_col = c[other_col_nm].drop(other_col_nm)\n\n\t\t\t# get the mean absolute correlations of each\n\t\t\tmn_1, mn_2 = np.nanmean(this_col), np.nanmean(that_col)\n\n\t\t\t# we might get nans?\n\t\t\t# if pd.isnull(mn_1) and pd.isnull(mn_2):\n\t\t\t\t# this condition is literally impossible, as it would\n\t\t\t\t# require every corr to be NaN, and it wouldn't have\n\t\t\t\t# even gotten here without hitting the continue block.\n\t\t\tif pd.isnull(mn_1):\n\t\t\t\tdrop_nm = other_col_nm\n\t\t\telif pd.isnull(mn_2):\n\t\t\t\tdrop_nm = nm\n\t\t\telse:\n\t\t\t\tdrop_nm = nm if mn_1 > mn_2 else other_col_nm\n\n\t\t\t# drop the bad col, row\n\t\t\tc.drop(drop_nm, axis=1, inplace=True)\n\t\t\tc.drop(drop_nm, axis=0, inplace=True)\n\n\t\t\t# add the bad col to drops\n\t\t\tdrops.append(drop_nm)\n\t\t\tmacor.append(np.maximum(mn_1, mn_2))\n\t\t\tcorrz.append(_MCFTuple(\n\t\t\t\t\tfeature_x=drop_nm,\n\t\t\t\t\tfeature_y=nm if not nm == drop_nm else other_col_nm,\n\t\t\t\t\tabs_corr=max_cor,\n\t\t\t\t\tmac=macor[-1]\n\t\t\t\t))\n\n\t\t\t# if we get here, we have to break so the loop will \n\t\t\t# start over from the first (non-popped) column\n\t\t\tbreak\n\n\t\t# if not finished, restarts loop, otherwise will exit loop\n\n\t# return\n\treturn drops, macor, corrz",
"def correlation_drop(df, threshold):\n df_copy = df.copy()\n col_corr = set()\n\n corr_matrix = df_copy.corr()\n\n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):\n colname = corr_matrix.columns[i]\n col_corr.add(colname)\n if colname in df_copy.columns:\n del df_copy[colname]\n print(col_corr)\n return df_copy",
"def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)",
"def filter_patterns(self,threshold):\n if threshold is not None:\n pass #learn threshold\n return filter(lambda pattern: pattern.score > threshold, self.patterns)",
"def Clean(pmf):\n vals = [val for val in pmf.Values() if val < thresh]\n [pmf.Remove(val) for val in vals]",
"def remove_multicollinearity_correlation(data: pd.DataFrame, threshold: Optional[float] = 0.8) -> pd.DataFrame:\n corr_data = pd.DataFrame(np.triu(np.abs(data.corr())), columns=data.columns)\n\n multicoll_columns = np.logical_and(corr_data >= threshold, corr_data < 1.0).any()\n return data.loc[:, ~multicoll_columns]",
"def prune_dims(variances, threshold=0.005):\r\n scale_z = np.sqrt(variances)\r\n return scale_z >= threshold",
"def trimCompo(self, threshold):\n newCompo = {}\n for key,value in self.m_compo.items():\n if value > threshold:\n newCompo[ key ] = value\n self.m_compo = newCompo",
"def drop_corr_columns(df, drop_columns=True, print_columns=True, threshold=0.98):\n\n # 1. calculation\n CorrCoeff = df.corr()\n\n # 2. report\n CorrFieldsList = []\n print('Columns with correlations more than %s :' % str(threshold))\n for i in CorrCoeff:\n for j in CorrCoeff.index[CorrCoeff[i] >= threshold]:\n if i != j and j not in CorrFieldsList:\n CorrFieldsList.append(j)\n if print_columns:\n print(\"%s-->%s: r^2=%f\" % (i, j, CorrCoeff[i][CorrCoeff.index == j].values[0]))\n #print()\n #print('Correlated columns count: %', len(CorrFieldsList))\n\n # 3. dropping\n if drop_columns:\n print('%s columns total' % df.shape[1])\n df = df.drop(CorrFieldsList, 1)\n print('%s columns left' % df.shape[1])\n\n return df",
"def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]",
"def filter_out_rare_points(points, threshold_pct=0.5):\n \n c = Counter(points)\n total = sum(c.values())\n l = []\n for p in points:\n v = c[p]\n if v/total * 100 <= threshold_pct:\n l.append(np.nan)\n else:\n l.append(p)\n \n return l",
"def remove_rows(df, threshold, log=False):\n if log: section_timer = Timer(log=f\"removing rows with more than {threshold * 100}% of NaNs\")\n\n non_nan_values = int(df.shape[1] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=0)\n\n if log: section_timer.end_timer(log=f\"removed {df.shape[0] - df_clean.shape[0]} rows\")\n return df_clean",
"def feature_selection(feature_matrix, missing_threshold=90, correlation_threshold=0.95):\n \n feature_matrix = pd.get_dummies(feature_matrix)\n n_features_start = feature_matrix.shape[1]\n print('Original shape: ', feature_matrix.shape)\n\n # Find missing and percentage\n missing = pd.DataFrame(feature_matrix.isnull().sum())\n missing['percent'] = 100 * (missing[0] / feature_matrix.shape[0])\n missing.sort_values('percent', ascending = False, inplace = True)\n\n # Missing above threshold\n missing_cols = list(missing[missing['percent'] > missing_threshold].index)\n n_missing_cols = len(missing_cols)\n\n # Remove missing columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in missing_cols]]\n print('{} missing columns with threshold: {}.'.format(n_missing_cols,\n missing_threshold))\n \n # Zero variance\n unique_counts = pd.DataFrame(feature_matrix.nunique()).sort_values(0, ascending = True)\n zero_variance_cols = list(unique_counts[unique_counts[0] == 1].index)\n n_zero_variance_cols = len(zero_variance_cols)\n\n # Remove zero variance columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in zero_variance_cols]]\n print('{} zero variance columns.'.format(n_zero_variance_cols))\n \n # Correlations\n corr_matrix = feature_matrix.corr()\n\n # Extract the upper triangle of the correlation matrix\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))\n\n # Select the features with correlations above the threshold\n # Need to use the absolute value\n to_drop = [column for column in upper.columns if any(upper[column].abs() > correlation_threshold)]\n\n n_collinear = len(to_drop)\n \n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in to_drop]]\n print('{} collinear columns removed with threshold: {}.'.format(n_collinear,\n correlation_threshold))\n \n total_removed = n_missing_cols + n_zero_variance_cols + n_collinear\n \n print('Total columns removed: ', total_removed)\n print('Shape after feature selection: {}.'.format(feature_matrix.shape))\n return feature_matrix",
"def delete_small_clusters(new_centroids, centroid_counter, threshold):\n\n out_centroids = []\n for n in range(len(new_centroids)):\n if centroid_counter[n] > threshold:\n out_centroids.append(new_centroids[n])\n out_centroids = np.array(out_centroids)\n return out_centroids",
"def _importance_based_graph_cut(self, graph, threshold):\n for node, data in graph.nodes_iter(data=True):\n if float(data['importance']) < threshold:\n graph.remove_node(node)\n return",
"def _remove_non_informative_rows(self, df, threshold):\n df_tmp = pd.DataFrame()\n n_features = len(df.columns)\n # calculating ratio of rows that have more than \"ratio\" missing values\n df_tmp['ratio'] = df.apply(lambda row: row.isnull().sum()/n_features, axis='columns')\n\n # kick too noisy rows\n return df[df_tmp['ratio'] <= threshold]",
"def prune_values(self, threshold):\n changed = False\n new_table = dict()\n for assignment in self._table.keys():\n prob = self._table[assignment]\n if prob >= threshold:\n new_table[assignment] = prob\n else:\n changed = True\n\n self._table = new_table\n return changed",
"def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]",
"def prune_trie(trie, threshold):\n\tnode = trie.root\n\tpq = []\n\tfor i in node.children.keys():\n\t\tpq.append((node.children[i],node.children[i].char))\n\twhile len(pq) > 0:\n\t\tcur_node, char = pq.pop()\n\t\tif cur_node.isEnd == False:\n\t\t\tfor i in cur_node.children.keys():\n\t\t\t\tpq.append((cur_node.children[i],char + cur_node.children[i].char))\n\t\telse:\n\t\t\tif cur_node.weight < threshold:\n\t\t\t\tdelete(trie, char)\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn trie",
"def truncate(coeffs, threshold=99):\n sortedindex = np.argsort(np.abs(coeffs))[::-1]\n Ncoeff = coeffs.shape[-1]\n cutoff = np.int(np.round(Ncoeff*threshold/100.))\n \n# print \"Keeping %2.0f %% (N=%s) of the biggest coefficients\"%(threshold,cutoff)\n\n coeffs_trunc = coeffs.copy() \t\t\t# copy of all coeff\n coeffs_trunc[sortedindex[cutoff:]] = 0 \t\t# put coeff\n \n return coeffs_trunc",
"def purgeHighSparsedFeatures(df,threshold,barplot=False,title=''):\n \n thr = math.floor(df.shape[1] * threshold)\n rowsToDrop = np.array([])\n logger.debug(Sc+'Patient Threshold is %d' % thr) \n logger.debug(Sc+'Matrix dimensions : Rows %d , Columns %d'% (df.shape[0],df.shape[1]))\n #axis_x = np.arange(0,df.shape[0]) \n axis_y = np.array([]) \n numRows = df.shape[0] \n for i in range(1,numRows):\n arr = pd.isnull(df.iloc[i])\n nnan = np.sum(arr) \n axis_y = np.append(axis_y,nnan)\n if (nnan > thr):\n rowsToDrop = np.append(rowsToDrop,i)\n logger.debug ('%d features to drop ' % len(rowsToDrop))\n np.savetxt('debug/sparseFeaturesaxis_y.txt',axis_y)\n #if(barplot):\n # ax.title.set_text(title)\n # ax.bar(axis_x,axis_y) \n #logger.debug('After purge there are %d columns '% df.shape[1])\n return rowsToDrop",
"def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree",
"def train_threshold(ref_centroids, feature_mat, k):\n all_dist = list()\n overall_max_dist = 0\n for i in range(len(feature_mat)):\n feature_vec = feature_mat[i]\n centroids_, distortion_ = cluster_feature(feature_vec, k)\n centroids_ = centroids_.reshape(len(centroids_), -1)\n\n # Compute distortion to decide the threshold\n # scipy.cdist: Computes distance between each pair of the two collections of inputs\n # Get the average minimum distance of each i to j pair where i is not equal to j\n dist = cdist(ref_centroids, centroids_, 'euclidean')\n code = dist.argmin(axis=1)\n min_dist_list = dist[np.arange(len(code)), code]\n avg_min_dist = min_dist_list.mean(axis=-1)\n all_dist.append(avg_min_dist)\n overall_max_dist = max(overall_max_dist, avg_min_dist)\n np_all_dist = np.asarray(all_dist)\n mean = np_all_dist.mean()\n std = np_all_dist.std()\n threshold = mean + 2 * std\n return threshold",
"def filter_detections(detections, arg_to_class, conf_thresh=0.5):\n num_classes = detections.shape[0]\n filtered_detections = []\n for class_arg in range(1, num_classes):\n class_detections = detections[class_arg, :]\n confidence_mask = np.squeeze(class_detections[:, -1] >= conf_thresh)\n confident_class_detections = class_detections[confidence_mask]\n if len(confident_class_detections) == 0:\n continue\n class_name = arg_to_class[class_arg]\n for confident_class_detection in confident_class_detections:\n coordinates = confident_class_detection[:4]\n score = confident_class_detection[4]\n detection = Box2D(coordinates, score, class_name)\n filtered_detections.append(detection)\n return filtered_detections",
"def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)"
]
| [
"0.71646625",
"0.69492567",
"0.66253006",
"0.6593147",
"0.6495919",
"0.6414996",
"0.632642",
"0.62061864",
"0.61500806",
"0.61159605",
"0.6099869",
"0.60162574",
"0.5921103",
"0.5836404",
"0.5829031",
"0.5768273",
"0.5731205",
"0.5721981",
"0.5694327",
"0.5686231",
"0.56675047",
"0.5640187",
"0.5628943",
"0.5615181",
"0.5581606",
"0.5561311",
"0.5555995",
"0.5539209",
"0.55066234",
"0.5506161"
]
| 0.76357895 | 0 |
Return a list of indices that will be affected when painting at the given position. If ``position`` is not specified, it is assumed to be the origin. | def indices(self, position=None):
raise NotImplementedError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def position(self):\n return self._pos.to_list()",
"def position_to_index(self, position, grid_size):\n x, y = position\n return x * grid_size + y",
"def calcSubIndices(self, position):\n ratios = (position - self._minPosition)\n ratios /= self._halfDims\n subIndices = ratios.astype(np.int32)\n return subIndices",
"def get_nodes_from_position(self, position=None):\n return [nodes for nodes, positions in self.tree.nodes(data=True) if positions[\"position\"] == position]",
"def canvas_position(\n self,\n position: jnp.ndarray,\n params: utils.Params\n ) -> jnp.ndarray:",
"def get_position(self):\n return list(self.position)",
"def get_position(self, position):",
"def get_index_3d_from_pos(self, pos):\n pos_wrapped = [wrap(x, L) for x, L in zip(pos, self.L)]\n index = [np.digitize(x, b) for x, b in zip(pos_wrapped, self.bins)]\n # subtract 1 from each index because np starts counting from 1\n index = [n-1 for n in index]\n return index",
"def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]",
"def index_from_position_tuple(self, position):\n x = self.base_values.index(position[0])\n y = self.base_values.index(position[1])\n return y * self.size + x",
"def neighbor(self, position):\n if self.options['PeriOpt']:\n if position[0] == 0:\n yield (self.size[0] - 1, position[1]), 0\n elif position[0] == self.size[0] - 1:\n yield (0, position[1]), 1\n if position[1] == 0:\n yield (position[0], self.size[1] - 1), 2\n elif position[1] == self.size[1] - 1:\n yield (position[0], 0), 3\n\n if position[0] > 0:\n yield (position[0] - 1, position[1]), 0\n if position[0] < self.size[0] - 1:\n yield (position[0] + 1, position[1]), 1\n if position[1] > 0:\n yield (position[0], position[1] - 1), 2\n if position[1] < self.size[1] - 1:\n yield (position[0], position[1] + 1), 3",
"def get_color_index(position):\n\n\t\tcolors = {\"blue\": 6,\n\t\t\t\t\t\t\t\"red\": 13,\n\t\t\t\t\t\t\t\"yellow\": 17,\n\t\t\t\t\t\t\t\"light_blue\": 18,\n\t\t\t\t\t\t\t\"pink\": 20,\n\t\t\t\t\t\t\t\"purple\": 30}\n\n\t\tpositions = {\"C\": \"yellow\",\n\t\t\t\t\t\t\t\t \"R\": \"red\",\n\t\t\t\t\t\t\t\t \"L\": \"blue\"}\n\n\t\tindex = colors.get(positions.get(position[0], None), None)\n\t\tif not index:\n\t\t\t\traise KeyError(\"Position color index '%s' not recognised.\" % position)\n\n\t\treturn index",
"def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours",
"def get_near_positions(self, position: tuple):\n\n return ((x, y) for x, y in (\n (position[0], position[1] + 1),\n (position[0], position[1] - 1),\n (position[0] + 1, position[1]),\n (position[0] - 1, position[1])\n ) if 0 <= x < self._map_height and 0 <= y < self._map_width)",
"def getPosition(self):\n\t\txxx1 = self.stokes()\n\t\txxx2 = self.thp()\n\t\txxx3 = self.tthp()\n\t\treturn [xxx1, xxx2, xxx3]",
"def update_position(position, direction):\n\n\tif direction == 'left':\n\t\treturn [position[0], position[1] - 1]\n\tif direction == 'right':\n\t\treturn [position[0], position[1] + 1]\n\tif direction == 'down':\n\t\treturn [position[0] + 1, position[1]]\n\tif direction == 'up':\n\t\treturn [position[0] - 1, position[1]]\n\treturn [-1, -1]",
"def get_position(self):\n return [self._row, self._column]",
"def id_from_position(self, position: PositionT) -> Union[int, Array]:\n int_pos = self._to_integer_position(position)\n ids = self._get_id_from_dict(self._int_position_to_site, int_pos)\n return ids",
"def list_posns(lot, x, y):\n return [position(t, x, y) for t in lot]",
"def positions_to_coords(self, positions):\n return [self.to_coords(px, py) for (px, py) in positions]",
"def get_positions(self):\n return self.positions",
"def position_to_index(position, grid_size):\n return position[0]*grid_size+position[1]",
"def screen_coordinates(pos):\n\n return [int((pos[0] % screen_width) / px), screen_height - int((pos[1] % screen_height) / px)]",
"def get_pos(self):\n return [self.row, self.col]",
"def inside(self, position):\n\n\t\tdelta = position - self.base\n\t\t\n\t\tcoordinates = numpy.dot(self.coordinate_matrix, delta)\n\n\t\tif (coordinates >= 0).all() and (coordinates.sum() <= 1):\n\t\t\treturn coordinates",
"def position(self):\n index = self._ordered_input_names.index('position')\n return self._inputs[index]",
"def positions(self):\n return self.inorder() # make inorder the default",
"def get_pos_index(self):\n return [self.row-1, self.col-1]",
"def gather_indexes(sequence_tensor, positions):\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor",
"def gather_indexes(sequence_tensor, positions):\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor"
]
| [
"0.6211723",
"0.62090737",
"0.6200937",
"0.619704",
"0.6058277",
"0.6018042",
"0.5992041",
"0.5921562",
"0.5854983",
"0.5845575",
"0.5837964",
"0.58091956",
"0.5769953",
"0.5736498",
"0.5736476",
"0.57310563",
"0.56805414",
"0.566974",
"0.5648781",
"0.5648487",
"0.5643726",
"0.56280446",
"0.55934966",
"0.55863565",
"0.5582273",
"0.5558353",
"0.55463415",
"0.55375844",
"0.5536386",
"0.5536386"
]
| 0.7316036 | 0 |
Verify the keystore file and directory | def _verify_keystore(self):
keystore_uid = FileUtil(self.keystore_file).uid()
if keystore_uid not in (-1, HostInfo.uid):
raise IOError("not owner of keystore: %s" % self.keystore_file)
keystore_dir = os.path.dirname(self.keystore_file)
if FileUtil(keystore_dir).uid() != HostInfo.uid:
raise IOError("keystore dir not found or not owner: %s" % keystore_dir)
if (keystore_uid != -1 and (os.stat(self.keystore_file).st_mode & 0o077)):
raise IOError("keystore is accessible to group or others: %s" % self.keystore_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_opts(self):\n self.ca_cert_file = os.environ['HOME'] + '/.cat_installer/ca.pem'\n self.pfx_file = os.environ['HOME'] + '/.cat_installer/user.p12'\n if not os.path.isfile(self.ca_cert_file):\n print(Messages.cert_error)\n sys.exit(2)",
"def verify (self, path):\n pass",
"def validate_keystore_key(args):\n expected_name = 'keyRings/%s/cryptoKeys/%s'\n expected_name %= (args.kms_keyring, args.kms_key)\n describe_output = ''\n try:\n describe_output = subprocess.check_output(\n ['gcloud', 'kms', 'keys', 'describe', args.kms_key,\n '--project', args.kms_project,\n '--location', args.kms_location,\n '--keyring', args.kms_keyring,\n '--format', 'value(name)'])\n except subprocess.CalledProcessError:\n pass\n if expected_name in describe_output:\n return\n # Print warning and exit if output did not include the key.\n warning = 'KMS key \"%s\" not found in keyring=%s project=%s location=%s'\n warning %= (args.kms_key,\n args.kms_keyring,\n args.kms_project,\n args.kms_location)\n Print.YL(warning)\n sys.exit(1)",
"def verify(key, file, sign):\n\n try:\n key = TomlKeyFormatter().from_string(key.read())\n signature = TomlSignatureFormatter().from_string(sign.read())\n\n if signature.verify(SignableBinaryIO(file), key):\n click.echo(\"---verified---\")\n exit(0)\n else:\n click.echo(\"---denied---\")\n exit(1)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except SignatureFormatError:\n click.echo(\"ERROR: Signature is in bad format\")",
"def verify(self):\n\t\t\n\t\tif not os.path.exists(self.objects_root):\n\t\t\tself.error = \"no such directory: %s\" % \\\n\t\t\t\tself.objects_root\n\t\t\treturn False\n\t\t\n\t\tif not os.path.isdir(self.objects_root):\n\t\t\tself.error = \"not a directory: %s\" % \\\n\t\t\t\tself.objects_root\n\t\t\treturn False\n\t\t\n\t\treturn True",
"def verify_path(self) -> Result(bool, Exception):\n\t\ttry:\n\t\t\tif self.type_encryption == enums.Encryption.RSA:\n\n\t\t\t\t# use the directory of the public key to encrypt a test message\n\t\t\t\th = rsa.Handler(self.directory_key_private, self.directory_key_public)\n\t\t\t\tmessage = 'test keys'\n\t\t\t\tencrypted = h.encrypt(message, h.get_public_key())\n\n\t\t\t\t# check if the original message and the decryption match\n\t\t\t\tif message == h.decrypt(encrypted):\n\t\t\t\t\t# if so, we can push them to the class variables and stop the function\n\t\t\t\t\tself._publicKey = h.get_public_key()\n\t\t\t\t\tself._privateKey = h.get_private_key()\n\n\t\t\t\treturn Result(True, None)\n\t\texcept Exception as e:\n\t\t\treturn Result(None, e)",
"def verify_file_path(self) -> None:\n path = \"/data\"\n verify_file_path(path)",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response",
"def check_load(log, signatures):\n\n if token_dir is None:\n return True\n\n if not signing_keys:\n return True\n\n # The web sandbox should be enough.\n if renpy.emscripten:\n return True\n\n if verify_data(log, signatures):\n return True\n\n def ask(prompt):\n \"\"\"\n Asks the user a yes/no question. Returns True if the user says yes,\n and false otherwise.\n \"\"\"\n\n return renpy.exports.invoke_in_new_context(renpy.store.layout.yesno_prompt, None, prompt)\n\n if not ask(renpy.store.gui.UNKNOWN_TOKEN):\n return False\n\n new_keys = [ i for i in get_keys_from_signatures(signatures) if i not in verifying_keys ]\n\n if new_keys and ask(renpy.store.gui.TRUST_TOKEN):\n\n keys_text = os.path.join(token_dir, \"security_keys.txt\")\n\n with open(keys_text, \"a\") as f:\n for k in new_keys:\n f.write(encode_line(\"verifying-key\", k))\n verifying_keys.append(k)\n\n if not signatures:\n return True\n\n # This check catches the case where the signature is not correct.\n return verify_data(log, signatures, False)",
"def __test_cert_file__(parser, certfile):\n if not os.path.exists(certfile):\n parser.error(\"invalid certificate file {} (it not exists)\".format(certfile))\n return True",
"def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))",
"def check_trust(cert_file, trust_dir):\n\n cert_hash = get_hash(cert_file)\n\n for filename in os.listdir(trust_dir):\n cert_path = os.path.join(trust_dir, filename)\n if os.path.exists(cert_path):\n if cert_hash == get_hash(cert_path):\n return CERT_TRUSTED\n return CERT_UNTRUSTED\n\n \"\"\"\n # Code to be used in production:\n cert_hash = get_hash(cert_file)\n cert_path = os.path.join(trust_dir, cert_hash)\n if os.path.exists(cert_path):\n if cert_hash == get_hash(cert_path):\n return CERT_TRUSTED\n return CERT_UNTRUSTED\n \"\"\"",
"def verify(self):\n self.verify_checksums()\n self.verify_apk_signature()\n self.verify_apk_signature_fprs()",
"def test_get_cert_store(self):\n context = Context(SSLv23_METHOD)\n store = context.get_cert_store()\n assert isinstance(store, X509Store)",
"def _verify_signtool(cls, sdk: WindowsSDK) -> bool:\n if not sdk.signtool_exe.is_file():\n return False\n\n try:\n sdk.tools.subprocess.check_output([sdk.signtool_exe, \"-?\"])\n except (OSError, subprocess.CalledProcessError):\n # Windows can raise OSError when it cannot run signtool. This can happen\n # when an old version of the SDK is installed and only signtool is installed.\n return False\n\n return True",
"def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)",
"def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))",
"def validateKrusty (self):\n self.mountMainPartition()\n installDictionary = self._createKrustyInstallationDictionary()\n self._writeDictionaryAsJson(installDictionary, self._getKrustyInstallationFilePath())\n self._log(\"validate-krusty\").notice(\"secure digital software (krusty) is validated\")",
"def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)",
"def test_check_privatekey_valid(self):\n key = load_privatekey(FILETYPE_PEM, client_key_pem)\n cert = load_certificate(FILETYPE_PEM, client_cert_pem)\n context = Context(SSLv23_METHOD)\n context.use_privatekey(key)\n context.use_certificate(cert)\n assert None is context.check_privatekey()",
"def _check(self):\n err_msg = \"\"\n plaintext_exists = os.path.exists(self.plain_file)\n encrypt_exists = os.path.exists(self.encrypted_file)\n\n if self.encrypt:\n if not plaintext_exists:\n err_msg = \"Could not find the file to encrypt '{}'. \".format(self.plain_file)\n else:\n if not encrypt_exists:\n err_msg = \"Could not find the encrypted file '{}'. \".format(self.encrypted_file)\n if plaintext_exists and not self.overwrite:\n err_msg += (\n \"The plaintext file '{}' already exists and --force was not \"\n \"specified. \".format(self.plain_file)\n )\n\n if not os.path.isdir(self.dest):\n err_msg += \"Destination directory '{}' does not exist. \".format(self.dest)\n if not os.path.exists(self.secret.keyfile):\n err_msg += \"Encryption key/passphrase file '{}' does not exist\".format(\n self.secret.keyfile\n )\n\n if err_msg:\n raise RuntimeError(\"Cannot process {}: {}\".format(self.plain_file, err_msg))",
"def test_load_verify_invalid_file(self, tmpfile):\n clientContext = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n clientContext.load_verify_locations(tmpfile)",
"def validate(self):\n if not self.key or not self.certificates:\n raise ValueError(\"Key or certificate missing in Keypair\")",
"def verify():\n verbose = True\n log(\n \"Verifying current directory as a Dallinger experiment...\",\n verbose=verbose,\n )\n ok = verify_package(verbose=verbose)\n if ok:\n log(\"✓ Everything looks good!\", verbose=verbose)\n else:\n log(\"☹ Some problems were found.\", verbose=verbose)",
"def exists(self):\n return os.path.exists(self.key_file)",
"def verify(args):\n\n if args.suppress_verify_output:\n sys.stdout = open(os.devnull, \"w\")\n\n try:\n # Check file exists\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n except FileNotFoundError:\n get_oidc_auth()\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n \n try:\n # Check file has a prefix (corresponds to federation in /etc/httpd/conf.d/zlcgdm-ugr-dav.conf)\n if \"prefix\" not in config_json:\n print(\"Federation prefix not specified\")\n return 1\n\n # Check file has a list of groups\n if \"groups\" not in config_json:\n print(\"No groups are specified\")\n return 1\n\n # Check groups is actually a list\n if not isinstance(config_json[\"groups\"], list):\n print(\"Groups should be a list\")\n return 1\n\n # Check validity of group format\n for index, group in enumerate(config_json[\"groups\"]):\n\n # Check group is a dict of items\n if not isinstance(group, dict):\n print(\"Groups should be a list of objects, group list index \" +\n str(index) + \" is not an object\")\n return 1\n\n # Check group has a name\n if \"name\" not in group:\n print(\"No name specified for group list index \" +\n str(index))\n return 1\n\n # Check validity of buckets assigned to groups\n for index2, bucket in enumerate(group[\"buckets\"]):\n\n # Check bucket has a name\n if \"name\" not in bucket:\n print(\"No name specified for bucket list index \" +\n str(index2))\n return 1\n\n # Check bucket name is a valid string\n if not isinstance(bucket[\"name\"], basestring):\n print(str(bucket[\"name\"]) + \" is not a string, \" +\n \"name should be a string for bucket list index \" +\n str(index2))\n return 1\n\n # Check if we have a valid value for propogate_permissions\n # propogate_permissions is set to true if we want to grant the given permissions for a path to all its child paths\n if \"propogate_permissions\" in bucket and not isinstance(bucket[\"propogate_permissions\"], bool):\n print(str(bucket[\"propogate_permissions\"]) + \" is not a bool, \" +\n \"propogate_permissions should be a bool for bucket list index \" + str(index2))\n return 1\n\n # Check bucket has a list of attributes required of the user for them to be authorised access\n if \"allowed_attributes\" not in bucket:\n print(\"No allowed attributes specified for bucket list index \" + str(index2))\n return 1\n\n # Check the above is in list format\n if not isinstance(bucket[\"allowed_attributes\"], list):\n print(str(bucket[\"allowed_attributes\"]) + \" is not a list, \" +\n \"allowed_attributes should be a list for bucket list index \" + str(index2))\n return 1\n\n # Checking each allowed attribute set in a bucket\n for attr_index, allowed_attributes in enumerate(bucket[\"allowed_attributes\"]):\n\n # Check allowed attribute is a dict\n if not isinstance(allowed_attributes, dict):\n print(\"allowed_attributes should be a list of objects, \" +\n \"attribute_requirements list index \" + str(attr_index) +\n \" endpoint list index \" + str(index2) +\n \" has an allowed_attributes list item that is not an object\")\n return 1\n\n # Check we have at least one key-value pair for specifying what the attribute needs to be, e.g. attribute: group, value: my-group\n if \"attribute_requirements\" not in allowed_attributes:\n print(\"No attribute_requirements specified in attribute_requirements list index \" +\n str(attr_index) + \" endpoint list index \" + str(index2))\n return 1\n\n # Check we have a string of allowed permissions for what the user with the given attributes can do\n # Currently, only r and l (read and list) are supported as IRIS DynaFed is read-only\n if \"permissions\" not in allowed_attributes:\n print(\"No permissions specified in attribute_requirements list index \" +\n str(attr_index) + \" endpoint list index \" + str(index2))\n return 1\n\n # Check each attribute is a dict containing the above\n if not isinstance(allowed_attributes[\"attribute_requirements\"], dict):\n print(\"attribute_requirements should be a dict, in attribute_requirements list index \" +\n str(attr_index) + \" endpoint list index \" + str(index2))\n return 1\n\n # Validate the format of each attribute\n if check_valid_attribute_condition(allowed_attributes[\"attribute_requirements\"], attr_index, index) == 1:\n return 1\n\n # use sets to check that only r, l, w and d values are allowed, it does allow for empty permissions\n if not set(allowed_attributes[\"permissions\"]) <= set([u\"r\", u\"w\", u\"l\", u\"d\", u\"c\"]):\n print(\"attribute_requirements permissions should be a string \" +\n \"containing any of the modes r (read) l (list) w (write) \" +\n \"d (delete) c (create), in attribute_requirements list index \" +\n str(attr_index) + \" bucket list index \" + str(index2))\n return 1\n\n print(\"Config file is valid\")\n # restore stdout\n sys.stdout = sys.__stdout__\n return 0\n \n except ValueError as e:\n print(\"Invalid JSON: {}\".format(e)) \n return 1",
"def req_CHECKPRESENT(self, key):\n # TODO: so we need to maintain mapping from urls to keys. Then\n # we could even store the filename within archive\n # Otherwise it is unrealistic to even require to recompute key if we\n # knew the backend etc\n lgr.debug(\"VERIFYING key %s\" % key)\n akey, afile = self._get_akey_afile(key)\n if self.get_contentlocation(akey):\n self.send(\"CHECKPRESENT-SUCCESS\", key)\n else:\n # TODO: proxy the same to annex itself to verify check for archive.\n # If archive is no longer available -- then CHECKPRESENT-FAILURE\n self.send(\"CHECKPRESENT-UNKNOWN\", key)",
"def test_05_get(self, mock_readall, mock_config, mock_verks):\n self._init()\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n mock_readall.return_value = self.credentials\n kstore = udocker.KeyStore(\"filename\")\n self.assertTrue(kstore.get(self.url))\n self.assertFalse(kstore.get(\"NOT EXISTING ENTRY\"))",
"def check(self):\n if self.is_signed():\n data = self._document.read()\n hash_value = data[-self._append_size+1:-1]\n data = data[:-self._append_size]\n\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n current_hash_value = encrypted[-16:]\n\n if current_hash_value != hash_value:\n print(\"Hash values did not matched!\")\n else:\n print(\"Hash values matched!\")\n else:\n print(\"The document is not signed!\")",
"def verify_key_data_exists(key, file_name):\n try:\n with open(file_name, 'r') as file:\n lines = file.readlines()\n for line in lines:\n row = [r.strip() for r in line.split(',')]\n if row[0] == key:\n # row[3] has file name\n with open(row[3], 'r') as rfile:\n if rfile.read():\n return True\n return False\n except Exception as file_error:\n raise file_error"
]
| [
"0.6201481",
"0.5993226",
"0.59332156",
"0.58958596",
"0.5819025",
"0.57155925",
"0.566656",
"0.562736",
"0.5623745",
"0.5585482",
"0.55580485",
"0.5514019",
"0.549556",
"0.54786116",
"0.54786116",
"0.547791",
"0.54653835",
"0.5459613",
"0.5443938",
"0.54259336",
"0.54120564",
"0.53953606",
"0.53866285",
"0.5378986",
"0.5378075",
"0.5374295",
"0.53718036",
"0.53643215",
"0.53389484",
"0.5338624"
]
| 0.75544286 | 0 |
Get credential from keystore for given url | def get(self, url):
auths = self._read_all()
try:
self.credential = auths[url]
return self.credential["auth"]
except KeyError:
pass
return "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def credential_get(uniqueID: str):\n\n cert = safeisland.certificate(uniqueID)\n return {\"payload\": cert}",
"def get_ssl_certificate():",
"def get_ssl_certificate() :",
"def get_credential(self, key):\n return self.creds.get(key, '')",
"def get_credentials(key):\n with open(\"credentials.json\", \"r\") as credentials_file:\n credentials_data = json.load(credentials_file)\n\n try:\n return credentials_data[key]\n except KeyError:\n raise KeyError(f\"Credential {key} was not found in file.\")",
"def get_credentials(self, oid=None):\n path = '/credentials'\n key = 'credentials'\n if oid is not None:\n path = '%s/%s' % (path, oid)\n key = 'credential'\n res = self.client.call(path, 'GET', data='', token=self.token)\n self.logger.debug('Get openstack credentials: %s' % truncate(res))\n try:\n return res[0][key]\n except:\n raise OpenstackError('No credentials found')",
"def _get_credential(self, key):\n return self._data.get(key, None)",
"def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url",
"def extract_credentials(url):\n parts = urlsplit(url)\n netloc = parts[1]\n if '@' in netloc:\n creds, netloc = netloc.split('@')\n credentials = tuple(_unquote(i) for i in creds.split(':'))\n parts = list(parts)\n parts[1] = netloc\n else:\n credentials = None\n return urlunsplit(parts), credentials",
"def get_credentials(prefs_file):\n with open(prefs_file, \"rb\") as pl:\n if six.PY2:\n prefs = plistlib.readPlist(pl)\n else:\n prefs = plistlib.load(pl)\n\n try:\n jamf_url = prefs[\"JSS_URL\"]\n except KeyError:\n jamf_url = \"\"\n try:\n jamf_user = prefs[\"API_USERNAME\"]\n except KeyError:\n jamf_user = \"\"\n try:\n jamf_password = prefs[\"API_PASSWORD\"]\n except KeyError:\n jamf_password = \"\"\n return jamf_url, jamf_user, jamf_password",
"def get_webpage_with_auth(url, username, password, logger):\n try:\n response = requests.get(url, auth=HTTPBasicAuth(username, password))\n return response.content\n except requests.exceptions.SSLError as e:\n logger.error('SSL error occurred while trying to retrieve {}\\nGot error: {}'.format(url, e))\n except requests.exceptions.BaseHTTPError as e:\n logger.error('HTTP error occurred while trying to retrieve {}\\nGot error: {}'.format(url, e))\n except Exception as e:\n logger.error('Unknown error occurred while trying to retrieve {}\\nError msg: {}'.format(url, e))",
"def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials",
"def certificate_auth():\r\n url = 'https://www.12306.cn'\r\n response = requests.get(url, verify=False)\r\n print(response.status_code)\r\n print(response.text)",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials_from_client(self, key, url, connection_id) -> Credentials:\n enc = _Encrypter(key)\n base64_private_key, nonce, verifier = enc.get_verifier()\n encrypted_url = enc.encrypt(url, base64.b64decode(nonce))\n encrypted_credentials, nonce = _HttpClient.get_logins(connection_id, nonce, verifier, encrypted_url)\n iv = base64.b64decode(nonce)\n\n return {\n credential['user']: credential\n for credential in [\n {\n 'user': enc.decrypt(encrypted_credential['Login'], iv),\n 'password': enc.decrypt(encrypted_credential['Password'], iv)\n }\n for encrypted_credential in encrypted_credentials\n ]\n }",
"def get_certificate(self, url):\n bearer = 'Authorization: Bearer '+str(self.exchanged_token).split('\\n', 1)[0]\n data = json.dumps({\"service_id\": \"x509\"})\n\n headers = StringIO()\n buffers = StringIO()\n\n c = pycurl.Curl()\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.HTTPHEADER, [bearer, 'Content-Type: application/json'])\n c.setopt(pycurl.POST, 1)\n c.setopt(pycurl.POSTFIELDS, data)\n c.setopt(c.WRITEFUNCTION, buffers.write)\n c.setopt(c.HEADERFUNCTION, headers.write)\n c.setopt(c.VERBOSE, True)\n\n try:\n c.perform()\n status = c.getinfo(c.RESPONSE_CODE)\n c.close()\n body = buffers.getvalue()\n\n if str(status) != \"303\" :\n self.log.error(\"On \\\"get redirect curl\\\": %s , http error: %s \" % (body, str(status)))\n return False \n except pycurl.error, error:\n errno, errstr = error\n self.log.info('An error occurred: %s' % errstr)\n return False\n \n redirect = self.tts\n for item in headers.getvalue().split(\"\\n\"):\n if \"location\" in item:\n redirect = redirect + item.strip().replace(\"location: \", \"\")\n\n headers = {'Authorization': 'Bearer ' + self.exchanged_token.strip()}\n response = requests.get(redirect, headers=headers)\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n # Whoops it wasn't a 200\n self.log.error(\"get_certificate() Error: %s \" %str(e))\n return False\n\n with open('/tmp/output.json', 'w') as outf:\n outf.write(response.content)\n else:\n self.log.error(\"No location in redirect response\")\n\n return True",
"def _v2_auth(self, url):\n return {\"auth\": {\n \"passwordCredentials\": {\"username\": self.user,\n \"password\": self.secret}}}",
"def getfilehttps(self, url):\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n response = urllib.request.urlopen(url, context=ctx)\n result = response.read()\n return result",
"def parse_url(url):\n results = NotifyBase.parse_url(url)\n if not results:\n # We're done early as we couldn't load the results\n return results\n\n try:\n # Retrieve our secret_key from the first entry in the url path\n results['secret_key'] = \\\n NotifyPushjet.split_path(results['fullpath'])[0]\n\n except IndexError:\n # no secret key specified\n results['secret_key'] = None\n\n # Allow over-riding the secret by specifying it as an argument\n # this allows people who have http-auth infront to login\n # through it in addition to supporting the secret key\n if 'secret' in results['qsd'] and len(results['qsd']['secret']):\n results['secret_key'] = \\\n NotifyPushjet.unquote(results['qsd']['secret'])\n\n return results",
"def get_appengine_credentials():\n return get_credentials()",
"def get_credentials(service_name=\"dataforSeo\", uname=\"[email protected]\"):\n pw = keyring.get_password(service_name, uname)\n return [uname, pw]",
"def read_config(self, config):\n parser = SafeConfigParser()\n parser.read(config)\n\n cert = parser.get('https', 'cert')\n key = parser.get('https', 'key')\n\n return cert, key",
"def _get_credentials(rse, endpoint):\n\n key = '%s_%s' % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Loading account credentials\")\n result = config.get_rse_credentials(None)\n if result and rse in result:\n result = result[rse]\n result['is_secure'] = result['is_secure'][endpoint]\n REGION.set(key, result)\n else:\n raise Exception(\"Failed to load account credentials\")\n logging.debug(\"Loaded account credentials\")\n except KeyError as e:\n raise exception.CannotAuthenticate('RSE %s endpoint %s not in rse account cfg: %s' % (rse, endpoint, e))\n except:\n raise exception.RucioException(\"Failed to load credentials for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'google-photos-stats.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data",
"def ssl_get_cert_from_request(request):\r\n certkey = \"SSL_CLIENT_S_DN\" # specify the request.META field to use\r\n\r\n cert = request.META.get(certkey, '')\r\n if not cert:\r\n cert = request.META.get('HTTP_' + certkey, '')\r\n if not cert:\r\n try:\r\n # try the direct apache2 SSL key\r\n cert = request._req.subprocess_env.get(certkey, '')\r\n except Exception:\r\n return ''\r\n\r\n return cert",
"def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials",
"def get_access_key():\n return get_config_handler().get_access_key()",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_json(client, url, credentials):\n return client.get(url, headers={'Authorization': 'Basic ' + credentials})"
]
| [
"0.6387216",
"0.6368557",
"0.6185664",
"0.58744323",
"0.5864043",
"0.58523196",
"0.5837761",
"0.5830516",
"0.5760828",
"0.5742771",
"0.5692614",
"0.56781346",
"0.5676719",
"0.56597006",
"0.5616557",
"0.5609421",
"0.56065726",
"0.56043714",
"0.56017315",
"0.5596782",
"0.55819565",
"0.55773675",
"0.5511692",
"0.549638",
"0.5484881",
"0.546907",
"0.5458848",
"0.5453206",
"0.5448565",
"0.544472"
]
| 0.6912512 | 0 |
Put credential in keystore for given url | def put(self, url, credential, email):
if not credential:
return 1
auths = self._read_all()
auths[url] = {"auth": credential, "email": email, }
self._shred()
return self._write_all(auths) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put(self, credential):\n pass",
"def set_credentials():",
"def injectPassword(url, httpCredentials):\n url = urlparse(url)\n\n # Only do this for HTTP URLs for which we have credentials\n if (\n url.scheme.startswith(ProtocolType.HTTP.value)\n and url.hostname in httpCredentials\n ):\n credentials = httpCredentials[url.hostname]\n\n # If username is defined via URL, it should match the one from the credentials\n if url.username and url.username != credentials.username:\n raise RuntimeError(\n \"Username mismatch: %r != %r\" % (url.username, credentials.username)\n )\n\n encodedNetloc = url.netloc\n\n # Username prefix (e.g. user@)\n usernamePrefix = credentials.username + \"@\"\n\n if not encodedNetloc.startswith(usernamePrefix):\n # Prepend username\n encodedNetloc = usernamePrefix + encodedNetloc\n\n tokens = encodedNetloc.split(\"@\")\n\n # Append password (percent encoded)\n encodedNetloc = (\n tokens[0] + \":\" + quote(credentials.password, safe=\"\") + \"@\" + tokens[1]\n )\n\n # Replace the netloc with the one encoded with a password\n url = url._replace(netloc=encodedNetloc)\n\n return urlunparse(url)",
"def refresh_cache(url):\n openid_config = OpenIDConnectConfiguration.load_config(url)\n jwks_uri = openid_config['jwks_uri']\n keys = OpenIDConnectConfiguration.load_jwks(jwks_uri)\n \n OpenIDConnectConfiguration.signing_keys[url] = keys",
"def set_credentials(self, username, password, url):\n # remove trailing slash off URL\n url = url.rstrip('/')\n # save variables to object\n self.url = url\n self.username = username\n self.password = password\n self.xml_rpc = '%s/server/xml.server.php' % (self.url)",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response",
"def makeHttps(url, cert_file=None, key_file=None, ca_file=None, **options):\n \n scheme, netloc, path, parameters, query, fragment = urlparse(url)\n if options.has_key('cache'):\n cache = options['cache']\n else:\n cache = None\n https = httplib2.Http(cache=cache, timeout=1000)\n if scheme == \"https\" and cert_file and key_file:\n https.add_certificate(key_file, cert_file, netloc)\n if ca_file:\n https.set_ca_file(ca_file)\n return https",
"def put_call(url, params=None, headers=None):\n if params is None:\n params = {}\n\n response = requests.put(url, data=params, headers=headers)\n\n if response.status_code == 401:\n raise RedirectException(reverse('b2b_control:logout'))\n\n return response",
"def store_password_in_keyring(username, password):\n return keyring.set_password(KEYRING_SYSTEM, username, password,)",
"def get_secure_link(url, key, expire=60, t=None):\n if \"?\" in url:\n url += \"&\"\n else:\n url += \"?\"\n if t is None:\n t = int(time.time())\n\n expire += t\n url += \"e=\" + str(expire)\n s = hmac.new(key.encode(), url.encode(), hashlib.sha256).digest()\n return url + \"&s=\" + base64.b64encode(s, b\"-_\").decode().rstrip(\"=\")",
"def add_share_key_to_url(plot_url, attempt=0):\n urlsplit = urllib.parse.urlparse(plot_url)\n username = urlsplit.path.split(\"/\")[1].split(\"~\")[1]\n idlocal = urlsplit.path.split(\"/\")[2]\n fid = \"{}:{}\".format(username, idlocal)\n body = {\"share_key_enabled\": True, \"world_readable\": False}\n response = v2.files.update(fid, body)\n\n # Sometimes a share key is added, but access is still denied.\n # Check that share_key_enabled is set to true and\n # retry if this is not the case\n # https://github.com/plotly/streambed/issues/4089\n time.sleep(4)\n share_key_enabled = v2.files.retrieve(fid).json()[\"share_key_enabled\"]\n if not share_key_enabled:\n attempt += 1\n if attempt == 50:\n raise _plotly_utils.exceptions.PlotlyError(\n \"The sharekey could not be enabled at this time so the graph \"\n \"is saved as private. Try again to save as 'secret' later.\"\n )\n add_share_key_to_url(plot_url, attempt)\n\n url_share_key = plot_url + \"?share_key=\" + response.json()[\"share_key\"]\n return url_share_key",
"def configure(self, source_url, hostcert, hostkey, ca_path):\n try:\n self.__url, self.__alias = source_url.split(\"#\")\n except ValueError:\n return \"Failed to parse URL#alias notation\"\n self.__hostcert = hostcert\n self.__hostkey = hostkey\n self.__ca_path = ca_path",
"def save_credentials(credentials):\n credentials. save_details()",
"def add_site(site_name, public_key):\n click.echo('Adding site key for site {}'.format(site_name))\n key_store = KeyStore(get_config_file())\n key_store.add_site(site_name, public_key)",
"def store_credentials(token, url='https://quantumexperience.ng.bluemix.net/api',\n hub=None, group=None, project=None, proxies=None,\n verify=True, overwrite=False):\n url = _parse_ibmq_credentials(url, hub, group, project)\n credentials.store_credentials(\n provider_class=IBMQProvider, overwrite=overwrite,\n token=token, url=url, proxies=proxies, verify=verify)",
"def put(self, url):\n return self.session.put(url=self.base_url + url)",
"def set_hashes(self, url, hashes):",
"def credentials(self, url, username_from_url, allowed_types):\n raise Passthrough",
"def create_keystore(self, environment, keystore_name):\n if keystore_name in self.list_keystores(environment):\n return self.get_keystore(environment, keystore_name)\n r = requests.post(\n f\"https://api.enterprise.apigee.com/v1/organizations/{self.apigee_org}/environments/{environment}/keystores\",\n data={\"name\": keystore_name},\n headers=self._auth_headers,\n )\n r.raise_for_status()\n return r.json()",
"def _update_credential(self, key, cred):\n self._data[key] = cred\n self._write()",
"def put(self, credentials):\n self._lock.acquire()\n f = open(self._filename, 'w')\n f.write(pickle.dumps(credentials))\n f.close()\n self._lock.release()",
"def aput(url, **kwargs):\n return requests.put(url, **kwargs)",
"def setpass(self, type, key):\n self.data.passwords[type] = key\n self.save()",
"def save_credentials(credentials):\n Credentials.save_credentials(credentials)",
"def store_apikey_in_keyring(platform_id='public', # type: str\n base_url=None, # type: str\n keyring_entries_username=KR_DEFAULT_USERNAME, # type: str\n apikey=None, # type: str\n ):\n client = ODSClient(platform_id=platform_id, base_url=base_url, keyring_entries_username=keyring_entries_username)\n client.store_apikey_in_keyring(apikey=apikey)",
"async def store_credential(\n self, cred_ex_record: V20CredExRecord, cred_id: str = None\n ) -> None:",
"def _v2_auth(self, url):\n return {\"auth\": {\n \"passwordCredentials\": {\"username\": self.user,\n \"password\": self.secret}}}",
"def put(self, credentials):\n entity = self._model.get_or_insert(self._key_name)\n setattr(entity, self._property_name, credentials)\n entity.put()",
"def save_credentials(verify = True):\n config = {\n 'client_id': input('client id: '),\n 'client_secret': input('client secret: ')\n }\n\n if verify:\n sp = spotipy.Spotify(client_credentials_manager = SpotifyClientCredentials(**config))\n try:\n sp.search(\"The Beatles\")\n except SpotifyException as e:\n # TODO: informative message\n raise\n\n path = Path(CONFIG_PATH).expanduser()\n print(\"Writing credentials to %s\" % path.absolute())\n\n yaml.dump(config, path.open('w'), default_flow_style = False)",
"def connect(\r\n self,\r\n key: str,\r\n secret: str,\r\n ) -> None:\r\n self.key = key\r\n self.secret = secret\r\n self.host, _ = _split_url(self.REST_HOST)"
]
| [
"0.6042651",
"0.58238924",
"0.5521307",
"0.5515872",
"0.5485544",
"0.5354821",
"0.5317913",
"0.5234732",
"0.523063",
"0.5222448",
"0.5218145",
"0.51993984",
"0.5184724",
"0.51704127",
"0.51523966",
"0.5114277",
"0.5083231",
"0.50800633",
"0.5076598",
"0.5052661",
"0.50402695",
"0.5033196",
"0.5022824",
"0.50085235",
"0.49973807",
"0.497276",
"0.49403518",
"0.49248776",
"0.49241647",
"0.4911245"
]
| 0.6396992 | 0 |
Delete credential from keystore for given url | def delete(self, url):
self._verify_keystore()
auths = self._read_all()
try:
del auths[url]
except KeyError:
return 1
self._shred()
return self._write_all(auths) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_credential(credentials):\n credentials.delete_credentials()",
"def _delete_credential(self, key):\n try:\n del self._data[key]\n except KeyError:\n pass\n self._write()",
"async def delete_url(self, url: StrOrURL):\n await self.delete(self.create_key('GET', url))",
"def delete_credential(self, credential):\r\n return self.delete(self.credential_path % (credential))",
"def remove(url: str):\n authenticated = credentials.authenticate(url)\n REMOVER_REGISTRY.get_handler(authenticated.scheme).remove(authenticated)",
"def delete_password(self, filename, website):\n\n if os.path.isfile(filename):\n with open(filename, 'r') as jdata:\n jfile = json.load(jdata)\n \n try:\n jfile.pop(website)\n with open(\"db/passwords.json\", 'w') as jdata:\n json.dump(jfile, jdata, sort_keys=True, indent=4)\n except KeyError:\n raise PasswordNotFound\n else:\n raise PasswordFileDoesNotExist",
"def remove_client_credentials(self):\n if self._dry_run:\n return\n os.unlink(self._store_pathname)",
"def delete_call(url, headers=None):\n response = requests.delete(url, headers=headers)\n\n if response.status_code == 401:\n raise RedirectException(reverse('b2b_control:logout'))\n\n return response",
"def __deleteLock(self, url):\n response = self._adapter.deleteRequest(url, self._baseHeader)\n return response",
"def delete_password_in_keyring(username):\n return keyring.delete_password(KEYRING_SYSTEM, username,)",
"def adel(url, **kwargs):\n return requests.delete(url, **kwargs)",
"def delete_api_key(api_key):\n api.delete(api_key)",
"def delete_credential(self):\n Credentials.credentials_list.remove(self)",
"def delete_user_credentials(connection, api_url):\n\n body = {\n 'endpoint': api_url,\n 'user': '',\n 'password': '',\n 'token': '',\n 'type': 'none'\n }\n\n connection.post_obj_as_json('user/credentials', body)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credential_list.remove(self)",
"def delete_user_key(self, key):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"DELETE\", \"/1/keys/%s\" % key, self.timeout)",
"def _delete(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> None:\n url = session.get_base_url2() + url_tail\n response = _requests_session.delete(\n url,\n headers=_get_headers(session),\n params=params,\n verify=session.verify_ssl_certs,\n )\n _check_response_status(response)",
"def delete_credential(name: str):\n # first load any existing credentials\n try:\n creds = load_auth()\n except FileNotFoundError:\n # if no auth file exists we can just treat that as there being no credentials\n creds = []\n\n if '@' in name:\n username, hostname = name.split('@')\n else:\n username = name\n hostname = None\n\n # next, try to figure out which one we're supposed to remove\n matches = []\n match_indices = []\n\n for idx, cred in enumerate(creds):\n # the username must match\n if cred.username != username:\n continue\n # if specified, the hostname must match\n if hostname is not None and cred.hostname != hostname:\n continue\n\n matches.append(cred)\n match_indices.append(idx)\n\n if len(matches) == 0:\n err = f\"No matching credential found with username '{username}'\"\n if hostname is not None:\n err += f\" with hostname '{hostname}'\"\n raise RuntimeError(err)\n elif len(matches) > 1:\n raise RuntimeError(_construct_ambiguous_deletion_message(username, hostname, matches))\n\n # At this point we should have exactly one match, which we can delete\n del creds[match_indices[0]]\n write_auth_data(configure.get_config_path(\"auth\"), creds)\n prune_outdated_auth()",
"def delete(self, url, user):\n token = self.login(user)\n response = requests.delete(url_root + url, headers={\"access-token\": token})\n return response.json(), response.status_code",
"def remove_credentials(service: str) -> None:\n\n # SQL query to remove the user servise credentials from the database\n query = f\"DELETE FROM {service}_credentials WHERE user_id=?;\"\n\n # Execute the query\n with connect(DATABASE) as db:\n db.execute(query, (session[\"user_id\"],))\n db.commit()",
"def unset_credentials(ctx, user, store):\n try:\n logger.debug(\"store={store}, user={user}\".format(store=store, user=user))\n _pycred.unset_credentials(store, user)\n except Exception as e:\n logger.debug(e, exc_info=True)\n print('Error: {msg}'.format(msg=str(e)), file=sys.stderr)\n sys.exit(1)",
"def Delete(url):\n\n prefix = ''.join([url, config_encoder.NAMESPACE_SEPARATOR])\n\n # Remove Test Suites\n test_keys = _GetEntityKeysByPrefix(ndb_models.Test, prefix)\n ndb.delete_multi(test_keys)\n\n # Remove Device Actions\n device_action_keys = _GetEntityKeysByPrefix(ndb_models.DeviceAction, prefix)\n ndb.delete_multi(device_action_keys)\n\n # Remove Test Run Actions\n test_run_action_keys = _GetEntityKeysByPrefix(\n ndb_models.TestRunAction, prefix)\n ndb.delete_multi(test_run_action_keys)\n\n # Remove Config Set Info\n config_set_info_key = mtt_messages.ConvertToKey(ndb_models.ConfigSetInfo, url)\n config_set_info_key.delete()",
"def delete(self, url):\n return self.session.delete(url=self.base_url + url)",
"def delete_credential(self):\n Credential.credential_list.remove(self)",
"def test_aws_service_api_keypair_delete(self):\n pass",
"def delete_credential(self):\n\n Credential.credential_list.remove(self)",
"def erase(self):\n self._verify_keystore()\n try:\n self._shred()\n os.unlink(self.keystore_file)\n except (IOError, OSError):\n return 1\n return 0"
]
| [
"0.6958643",
"0.6620609",
"0.64928526",
"0.6304529",
"0.627589",
"0.6181416",
"0.6139326",
"0.6083812",
"0.59728557",
"0.5962097",
"0.5958842",
"0.5955593",
"0.59546417",
"0.59232026",
"0.58883035",
"0.58883035",
"0.58883035",
"0.5882123",
"0.5849173",
"0.5834461",
"0.5814376",
"0.57851624",
"0.5775828",
"0.57710737",
"0.5769562",
"0.5757515",
"0.5751363",
"0.57201403",
"0.56781197",
"0.5675114"
]
| 0.8057114 | 0 |
Checks if the given API response is an error, and then raises the appropriate exception. | def check_error(self, response):
if type(response) is dict and response.has_key('status_code'):
if response['status_code'] != 200:
raise rocket.RocketAPIException(response['status_code'],
response['status_text']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_api_error(self, response):\n code = response.status_code\n self.__log(f'Handling API error with status code {code}.', 'error')\n if code == 401:\n self.__log(f'Invalid credentials. Please make sure your token is correct.', 'error')\n raise InvalidCredentialsError\n if code == 404:\n self.__log(f'File not found on query. Make sure query URL is correct and retry.', 'error')\n raise FileNotFoundError\n if code == 422:\n content = json.loads(response.content)\n for error in content['errors']:\n self.__log(f'API could not process the request. Message: {error[\"message\"]}.', 'error')\n raise UnprocessableRequestError(f'Issue with field {error[\"field\"]}: {error[\"message\"]}')\n if code == 429:\n self.__log(f'Monthly request limits exceeded. Upgrade billing or change token.', 'error')\n raise MonthlyRequestLimitExceededError\n self.__log(f'Response for code: \"{code}\" was unhandled by wrapper. Sorry to not be more helpful.', 'error')\n raise UnknownApiError(\"An unhandled API exception occurred\")",
"def handle_api_error(resp):\n content = yield resp.json()\n\n headers = HeaderWrapper(resp.headers)\n\n try:\n err = content['error']\n except (KeyError, TypeError):\n raise error.APIError(\n \"Invalid response object from API: %r (HTTP response code \"\n \"was %d)\" % (content, resp.code),\n resp, resp.code, content, headers)\n\n if resp.code in [400, 404]:\n raise error.InvalidRequestError(\n err.get('message'), err.get('param'),\n resp, resp.code, content, headers)\n elif resp.code == 401:\n raise error.AuthenticationError(\n err.get('message'),\n resp, resp.code, content, headers)\n elif resp.code == 402:\n raise error.CardError(\n err.get('message'), err.get('param'), err.get('code'),\n content, resp.code, resp, headers)\n else:\n raise error.APIError(\n err.get('message'), content, resp.code, resp, headers)",
"def _handle_api_error(self, error):\n status_code = error.response.status_code\n message = error.message\n\n if 403 == status_code:\n raise NewRelicInvalidApiKeyException(message)\n elif 404 == status_code:\n raise NewRelicUnknownApplicationException(message)\n elif 422 == status_code:\n raise NewRelicInvalidParameterException(message)\n else:\n raise NewRelicApiException(message)",
"def raise_for_status(response):\n if response.status_code != 200:\n res_data = response.json()\n if (response.status_code, res_data['error']) in error_map:\n raise error_map[(response.status_code, res_data['error'])](res_data['error_description'])\n raise ShoperApiError(res_data['error_description'])\n\n return response",
"def raise_best_exception(self, json_response):\n exceptions = {\n 206: CannotParseError,\n 400: BadRequestError,\n 401: NotAuthorizedError,\n 403: ForbiddenError,\n 404: NotFoundError,\n 500: ServerError,\n 503: UnavailableError,\n }\n try:\n err = json_response['response']['error']\n raise exceptions[err['code']](err['code'],err['message'])\n except IndexError:\n raise UnexpectedError('','Unexpected error.')",
"def validate_response(response):\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)",
"def _raise_if_error(response):\n if response.status_code != 200:\n raise SimpleHTTPException(response)",
"def raise_on_error(request: requests.Response) -> None:\n if request.status_code >= 400:\n json_res = request.json()\n raise requests.HTTPError(json_res)\n\n return None",
"def _check_status_error(self, res: requests.Response) -> None:\n try:\n if self._raise_exceptions:\n res.raise_for_status()\n if res.status_code > 600:\n raise requests.exceptions.HTTPError(\n u'%s Illegal return code: %s for url: %s' % (res.status_code, res.reason, res.url),\n response=res)\n\n except requests.exceptions.HTTPError as err:\n http_error_msg = str(err.args[0])\n\n if res.content:\n try:\n json_result: dict = res.json()\n message = json_result['error']['message']\n http_error_msg += \": \" + message\n except (json.JSONDecodeError, KeyError):\n if '_TOKEN' not in res.text:\n http_error_msg += \": \" + str(res.text)\n\n raise requests.exceptions.HTTPError(http_error_msg, response=err.response) from err",
"def validate_response(self, response: requests.Response) -> None:\n if 400 <= response.status_code < 500:\n msg = (\n f\"{response.status_code} Client Error: \"\n f\"{response.reason} for path: {self.path}. \"\n f\"Request payload: {response.request.body}\"\n )\n raise FatalAPIError(msg)\n\n elif 500 <= response.status_code < 600:\n msg = (\n f\"{response.status_code} Server Error: \"\n f\"{response.reason} for path: {self.path}\"\n )\n raise RetriableAPIError(msg)",
"def check_response(response):\n if response.status_code in [400, 401, 403, 404, 429, 500, 503, 504]:\n raise ServiceException(response.status_code)\n else:\n response.raise_for_status()",
"def handle_error_response(resp):\n error_message = ''\n error_message_with_reason = ''\n try:\n error_message = (\n resp.json()\n .get('fireeyeapis', {})\n .get('description', '')\n .strip()\n )\n error_message = error_message.replace('\\n', '')\n if error_message:\n error_message_with_reason = f'Reason: {error_message}'\n except ValueError: # ignoring json parsing errors\n pass\n if resp.headers.get('Content-Type', '') == CONTENT_TYPE_ZIP:\n error_message = error_message_with_reason = resp.text\n\n status_code_messages = {\n 400: f\"{MESSAGES['BAD_REQUEST_ERROR']} {error_message_with_reason}\",\n 401: MESSAGES['AUTHENTICATION_ERROR'],\n 403: error_message,\n 404: error_message,\n 406: error_message,\n 407: MESSAGES['PROXY_ERROR'],\n 500: MESSAGES['INTERNAL_SERVER_ERROR'],\n 503: MESSAGES['INTERNAL_SERVER_ERROR'],\n }\n\n if resp.status_code in status_code_messages:\n demisto.debug(\n f'Response Code: {resp.status_code}, Reason: {status_code_messages[resp.status_code]}'\n )\n raise DemistoException(status_code_messages[resp.status_code])\n else:\n raise DemistoException(resp.raise_for_status())",
"def _raise_dataroboterror_for_status(response):\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError:\n err_msg = '{code} Error: {msg}'.format(\n code=response.status_code, msg=response.text)\n raise DataRobotPredictionError(err_msg)",
"def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)",
"def _check_response_status(response):\n # type: (Response) -> None\n try:\n response.raise_for_status()\n except HTTPError as e:\n raise HTTPError(f\"{e}. {response.text}\", response=response)",
"def raise_for_status(response: Response):\n\n if response.status_code != 200:\n error_body = response.json()['error']\n code = error_body['code']\n message = error_body['message']\n description = error_body['description']\n\n raise TradeException(status_code=response.status_code, code=code,\n message=message,\n description=description)\n\n return response",
"def _raise_error(self, status: int, result: dict):\n raise APIError(status, self._get_error_text(result))",
"def _handle_exception(self, exc):\n if isinstance(exc, APIError):\n self.return_api_error(exc.error)\n else:\n return False",
"def exception_handler(res):\n try:\n res_data = res.json()\n error_code = res_data['status']\n error_msg = build_error_msg(res_data['errors'])\n exception = DemistoException(ERROR_TITLES.get(error_code, '') + error_msg)\n\n except Exception:\n exception = DemistoException(f'Error in API call [{res.status_code}] - {res.reason}')\n\n raise exception",
"def raise_for_error(bapiret: BAPIReturnRFC, response) -> BAPIReturn:\n\n bapi_return = BAPIReturn(bapiret)\n if bapi_return.is_error:\n raise BAPIError(bapi_return, response)\n\n return bapi_return",
"def __CheckResponse(self, response):\n\n status = response.status\n if (status == httplib.OK or status == httplib.CREATED\n or status == httplib.NO_CONTENT):\n return\n elif (status == httplib.UNAUTHORIZED):\n raise BadCredentialsException\n elif (status == httplib.SERVICE_UNAVAILABLE):\n raise ServerBusyException\n elif (status == httplib.BAD_REQUEST\n or status == httplib.UNPROCESSABLE_ENTITY):\n raise BadArgumentsException\n elif (status == httplib.NOT_FOUND):\n raise NotFoundException\n else:\n raise BadOperationException",
"def check_response(response):\n for status_code, err_class, err_type in [\n (HTTPStatus.INTERNAL_SERVER_ERROR, ServerError, 'Server'),\n (HTTPStatus.BAD_REQUEST, ClientError, 'Client')\n ]: # highest http status code first\n if response.status_code >= status_code:\n try:\n status = HTTPStatus(response.status_code)\n except ValueError as err:\n m = re.search(r'\\d{3}', err.args[0], flags=re.ASCII)\n if not m:\n raise err\n msg = f'Generic {err_type} Error ({m.group()})'\n else:\n msg = f'({status}) {status.description}'\n\n raise err_class(msg)\n\n if response.status_code == HTTPStatus.OK \\\n and SERVER_DB_ERROR_MSG in response.text:\n raise ServerError('Server cannot access the database')",
"def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex",
"def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)",
"async def handle_api_error(ctx: Context, e: ResponseCodeError) -> None:\n if e.status == 404:\n log.debug(f\"API responded with 404 for command {ctx.command}\")\n await ctx.send(\"There does not seem to be anything matching your query.\")\n ctx.bot.stats.incr(\"errors.api_error_404\")\n elif e.status == 400:\n log.error(\n \"API responded with 400 for command %s: %r.\",\n ctx.command,\n e.response_json or e.response_text,\n )\n await ctx.send(\"According to the API, your request is malformed.\")\n ctx.bot.stats.incr(\"errors.api_error_400\")\n elif 500 <= e.status < 600:\n log.warning(f\"API responded with {e.status} for command {ctx.command}\")\n await ctx.send(\"Sorry, there seems to be an internal issue with the API.\")\n ctx.bot.stats.incr(\"errors.api_internal_server_error\")\n else:\n log.warning(f\"Unexpected API response for command {ctx.command}: {e.status}\")\n await ctx.send(f\"Got an unexpected status code from the API (`{e.status}`).\")\n ctx.bot.stats.incr(f\"errors.api_error_{e.status}\")",
"def raise_for_response(self, response):\n try:\n code = response.errors[0][0]._code\n\n if code == 'invalidRecipient':\n raise InvalidRecipientException()\n elif code == 'recipientBlocked':\n raise RecipientBlockedException()\n elif code == 'emptyMessageContent':\n raise EmptyMessageContentException()\n elif code == 'other':\n raise OtherMMSOAPException()\n else:\n pass\n\n except AttributeError:\n pass",
"def _check_for_api_errors(geocoding_results):\n status_result = geocoding_results.get(\"STATUS\", {})\n api_call_success = status_result.get(\"status\", \"\") == \"SUCCESS\"\n if not api_call_success:\n access_error = status_result.get(\"access\")\n access_error_to_exception = {\n 'API_KEY_INVALID': GeocoderAuthenticationFailure,\n 'OVER_QUERY_LIMIT': GeocoderQuotaExceeded,\n }\n exception_cls = access_error_to_exception.get(\n access_error, GeocoderServiceError\n )\n raise exception_cls(access_error)",
"def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n if \"Invalid tracking number\" in notification.Message:\r\n raise FedexInvalidTrackingNumber(notification.Code,\r\n notification.Message)\r\n else:\r\n raise FedexError(notification.Code,\r\n notification.Message)",
"def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()",
"def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)"
]
| [
"0.8030815",
"0.78007025",
"0.7488525",
"0.74808234",
"0.74602336",
"0.74405646",
"0.742001",
"0.7412156",
"0.72705823",
"0.7199109",
"0.7192201",
"0.71805555",
"0.71575356",
"0.71509993",
"0.71265465",
"0.7107663",
"0.70696384",
"0.70497006",
"0.70292777",
"0.6981177",
"0.69456804",
"0.6938655",
"0.69201344",
"0.69181716",
"0.69028",
"0.68763983",
"0.68475604",
"0.6837656",
"0.68275243",
"0.682284"
]
| 0.79649657 | 1 |
Find the first text between given pair of tags, returns both the text and position | def find_text_in_tag(st, tag):
if tag == "e1":
st = st.replace("<e2>", "")
st = st.replace("</e2>", "")
elif tag == "e2":
st = st.replace("<e1>", "")
st = st.replace("</e1>", "")
for i in range(len(st) - (len(tag)+2) + 1): # +2 is for < and >
if st[i:i+len(tag)+2] == "<" + tag + ">":
for j in range(i+1, len(st) - (len(tag)+3) + 1):
if st[j:j+len(tag)+3] == "</" + tag + ">":
return st[i+len(tag)+2:j], i - 1
print("ERROR: tag \"{}\" in string \"{}\" not found!".format(tag, st)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_first_contents(html, *tags):\n # return the stuff in between the first tag found or None\n x = get_contents(html, *tags)\n if not x:\n return None\n return x[0]",
"def find_start_end(text, start_text, end_text, start=0):\n # return (s, e) or None\n s = text.find(start_text, start)\n if s < 0:\n return None\n e = text.find(end_text, s+1)\n if e < 0:\n return None\n e += len(end_text)\n return s, e",
"def process_match(text, pos):\n m, _ = parse_ent('<' + text + '>', pos - len(text))\n return len(text) - len(m) + 2",
"def find_between(s, first='<title>', last='</title>'):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return s",
"def get_text_by_tag(start, tagname, default=None):\n node_back = start.getElementsByTagName(tagname)[0]\n for node in node_back.childNodes:\n if node.nodeType == node.TEXT_NODE:\n return node.data\n\n return default",
"def find_text_between(start,end,haystack):\n found = re.search(start+'(.*)'+end,haystack,re.IGNORECASE | re.DOTALL)\n \n if found:\n return found.group(1).strip()\n else:\n raise Exception(\"There is no substring starting with '{}', ending\"\n \" with '{}' in content '{}' \".format(start,end,haystack))",
"def find_first_tag(self, tag):\n for lm, _ in self.search(tag=tag):\n return lm",
"def FIND(find_text, within_text, start_num=1):\n return within_text.index(find_text, start_num - 1) + 1",
"def get_offsets(word, raw_text):\n try:\n match = re.search(word, raw_text)\n return (match.start(), match.end())\n except AttributeError: #could not find word\n return (0, 0)",
"def between_markers(text: str, begin: str, end: str) -> str:\n # your code here\n return text[text.find(begin) + 1:text.find(end)]",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def get_word_postion(self, word: Word) -> Tuple[int, int]:\n text: str = self.to_text()\n words: List[Word] = self.get_words()\n current_position: int = 0\n\n for w in words:\n current_position = text.find(w.text, current_position)\n\n if w == word:\n return (current_position, current_position + len(w.text))\n return 0, 0",
"def between_markers(text: str, begin: str, end: str): # -> str\n begin_pos = text.find(begin)\n end_pos = text.find(end)\n \n if (begin_pos != -1 and end_pos != -1 and begin_pos < end_pos):\n return text[begin_pos + len(begin) : end_pos]\n elif (begin_pos == -1 and end_pos != -1):\n return text[0: end_pos]\n elif (begin_pos != -1 and end_pos == -1):\n return text[begin_pos + len(begin) :]\n elif(begin_pos == -1 and end_pos == -1):\n return text\n elif (begin_pos != -1 and end_pos != -1 and begin_pos > end_pos):\n return ''",
"def match_first_paragraph():\n html = \"<p>pybites != greedy</p>\" \"<p>not the same can be said REgarding ...</p>\"\n\n pattern = \"<p>(.+?)</p>\"\n text = html\n\n match = re.findall(pattern, text)\n return match[0]",
"def match_pair(expr, pair=(r'{', r'}'), start=0):\n\n beg = pair[0]\n fin = pair[1]\n\n # find first opening\n sstart = expr.find(beg, start)\n\n count = 0\n\n if beg == fin:\n eend = expr.find(fin, sstart + 1)\n return sstart, eend\n\n p = re.compile('(' + beg + '|' + fin + ')', re.M)\n ps = re.compile(beg, re.M)\n\n iterator = p.finditer(expr, start)\n\n for match in iterator:\n if ps.match(match.group()):\n count += 1\n else:\n count += -1\n\n if count == 0:\n return sstart, match.end()\n\n return None",
"def find_txt(s, d1=\"$\", d2=\"\\n\"):\n first=s.find(d1)\n second=s[first:].find(d2) + len(s[:first])\n return(s[first+len(d1):second])",
"def find_tag(tag_hash):\n for i in tags_fin:\n if tag_hash == i[2]:\n return i[1]",
"def get_tags_and_contents(html, *tags):\n # return the tags found, plus the stuff between them\n found = []\n for tag in tags:\n start, end = _make_tag_patterns(tag)\n i = 0\n while 1:\n m = re.search(start, html[i:], re.IGNORECASE|re.DOTALL)\n if not m:\n break\n s = m.start()+i\n \n m = re.search(end, html[s:], re.IGNORECASE|re.DOTALL)\n if not m:\n break\n e = m.end()+s\n \n found.append(html[s:e])\n i = e\n # This segfaults sometimes on Python 2.2!\n #reobj = re.compile(r\"%s.*?%s\" % (start, end), re.IGNORECASE|re.DOTALL)\n #found.extend(reobj.findall(html))\n return found",
"def find_text (node, tag):\n rc = \"\"\n n = node.find (\".//%s\" % tag)\n if n is not None:\n rc = n.text\n return rc",
"def __find_between(self, s, first, last):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return \"\"",
"def textbetween(variable,\n firstnum=None,\n secondnum=None,\n locationoftext='regular'):\n if locationoftext == 'regular':\n return variable[firstnum:secondnum]\n elif locationoftext == 'toend':\n return variable[firstnum:]\n elif locationoftext == 'tostart':\n return variable[:secondnum]",
"def get_ellipsis_location(tree, target_tag):\n\n index = \"\".join(re.findall(r\"\\d+\", target_tag))\n tag = re.sub(index, \"\", target_tag)\n counter = 0\n for node in tree.subtrees():\n if node.label().split(\"end\")[0] == tag:\n if counter == int(index):\n return node.treeposition()\n else:\n counter += 1",
"def extract_line(line, pos):\n found = u''\n depth = 0\n offset = 0\n for c in line:\n if c == '<':\n depth += 1\n elif c == '>':\n depth -= 1\n if depth == 0:\n pos -= process_match(found[1:], pos + offset)\n found = u''\n\n if depth > 0:\n found += c\n\n offset += 1\n return pos + offset",
"def parse_ent(text, pos, typ='W'):\n logging.debug('parse_ent(%s, %d, %s)', text, pos, typ)\n if text[0] != '<':\n raise UserWarning('Entity does not start with \"<\"')\n idx = 1\n while is_category_char(text[idx]):\n logging.debug('Skipping category [%c]', text[idx])\n idx += 1\n logging.debug('After category skip: %d', idx)\n\n # Skip space\n if text[idx] == ' ':\n logging.debug('Skipping space after category')\n idx += 1\n match = u''\n this_level = 0\n\n while text[idx] != '>':\n logging.debug('Examining %d [%c]', idx, text[idx])\n if text[idx] == '<':\n sub, sl = parse_ent(text[idx:], pos + len(match), 'P')\n match += sub\n idx += sl + 1\n else:\n match += text[idx]\n idx += 1\n this_level += 1\n logging.debug('Found end at %d', idx)\n\n emit_entity(pos, typ, match)\n logging.debug('return [%s], %d', match, idx)\n return match, idx",
"def get_position(html_job_container):\n soup = BeautifulSoup(html_job_container, 'html.parser')\n job_position = soup.find_all(\n class_=\"jobLink jobInfoItem jobTitle\") # .get_text()\n if len(job_position) == 2:\n return job_position[1].get_text()\n return None",
"def find_first_block(text: str) -> (int, int, str):\n initial = text[0]\n if ishex(initial):\n start, end = find_first_number_block(text)\n typee = \"number\"\n elif initial == \"(\":\n start, end = find_first_parenthesis_block(text)\n typee = \"parenthesis\"\n else:\n start, end = 0, 0\n typee = \"operator\"\n return start, end, typee",
"def find(st, sub):\n\n if not sub: return None\n\n if sub[0] not in st.root.trans: return None\n\n i, s = 0, st.root\n while True:\n k, p, s = s.trans[sub[i]]\n len1, len2 = p-k+1, len(sub)-i\n if len1 >= len2:\n if st.text[k:k+len2] == sub[i:]: return k-i\n break\n else:\n if st.text[k:k+len1] == sub[i:i+len1]: i += len1\n else: break\n\n return None",
"def get_words_position(self, words: List[Word]) -> Tuple[int, int]:\n start: int = self.get_word_postion(words[0])[0]\n end: int = self.get_word_postion(words[-1])[1]\n return start, end",
"def search_sentence(target, sentences, tags, distance_evaluator=DistanceEvaluators.JACCARD):\n tag_id = 'VOID'\n best_sentence = ''\n best_distance = float('Infinity')\n x = list(zip(sentences, tags))\n for sentence, tag in zip(sentences, tags):\n #print(sentence)\n #print(\"\\n\\n\\n\" + tag)\n distance = distance_evaluator(sentence, target)\n if distance < best_distance:\n tag_id = tag\n best_sentence = sentence\n best_distance = distance\n return tag_id, best_sentence, best_distance",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n raise NotImplementedError"
]
| [
"0.65704864",
"0.64493614",
"0.64232105",
"0.6385036",
"0.6361685",
"0.62540066",
"0.6239112",
"0.6164878",
"0.61506945",
"0.61237997",
"0.5998363",
"0.5797227",
"0.5793289",
"0.57594854",
"0.57333",
"0.5728678",
"0.5694469",
"0.56718785",
"0.5658322",
"0.5646367",
"0.56407046",
"0.56337696",
"0.56014144",
"0.5599904",
"0.55807596",
"0.5572711",
"0.55648535",
"0.55437493",
"0.5530274",
"0.5527276"
]
| 0.73444617 | 0 |
Make a data.com specific auth http request. Adds additional headers Raises BadAuthentication exception if the response is a 400 or 500level response. | def auth_http_request(method, uri, **kwargs):
kwargs["headers"] = _get_datacom_headers(method, kwargs.get("headers"))
logger.debug("auth request headers: %s" % kwargs["headers"])
resp = make_http_request(method, uri, **kwargs)
if not resp.ok:
raise BadAuthentication(resp.status_code, resp.url, body=resp.content)
return resp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_auth(self, http_request):\r\n pass",
"def __call__(self, request):\n self._logger.debug(f'__call__, {request.url} adding Authorization header')\n request.headers[\"Authorization\"] = self._get_auth_value()\n request.register_hook(\"response\", self._handle_401)\n return request",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})",
"def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})",
"def request_http_header( self ) -> dict:\n return {'content-type': 'application/json','Authorization':f'NLAuth nlauth_account={self._acct_number},nlauth_email={self._auth_email},nlauth_signature={self._acct_signature},nlauth_role=1090'}",
"def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def auth_handler(self, url, method, timeout, headers, data):\n username = self.username\n password = self.password\n return basic_auth_handler(url, method, timeout, headers, data, username,\n password)",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def authenticate():\n return Response(\n '', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )"
]
| [
"0.665069",
"0.6581979",
"0.6573325",
"0.65479815",
"0.6466093",
"0.6465924",
"0.6384124",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.6361779",
"0.63553095",
"0.63393646"
]
| 0.7463226 | 0 |
Install a mock Python script that can be called as an external program. text content of the script without the interpreter (!...) line path optional target path (including the script's file name) name optional script's file name, put it in a temporary directory on_path whether to make the script available on the binary search path (PATH) env optional environment variable to store the script path (e.g. PAGER) The path to the script is appended to a list of script to remove upon teardown. If environment variables are modified, their original state is saved so it may be reset after the test has run. Returns the path to the script. | def install(text, path=None, name=None, on_path=False, env=None):
if path is not None:
tmp_paths.append(path)
file_ = open(path, 'w')
elif name is not None:
directory = tempfile.mkdtemp()
tmp_paths.append(directory)
path = os.path.join(directory, name)
file_ = open(path, 'w')
else:
handle, path = tempfile.mkstemp()
tmp_paths.append(path)
file_ = os.fdopen(handle, 'w')
file_.write(HEAD % dict(
executable=sys.executable,
pythonpath=sys.path,
))
file_.write(text)
file_.close()
os.chmod(path, 0754)
if on_path:
original_environ.setdefault('PATH', os.environ['PATH'])
os.environ['PATH'] = ':'.join((directory, os.environ['PATH']))
if env is not None:
original_environ.setdefault(env, os.environ.get(env))
os.environ[env] = path
return path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pre_start_script_tmp_py(tmp_path: Path) -> Path:\n tmp_file = shutil.copy(Path(pre_start_module.__file__), tmp_path)\n return Path(tmp_file)",
"def pre_start_script_tmp_sh(tmp_path: Path) -> Path:\n tmp_file = tmp_path / \"prestart.sh\"\n with open(Path(tmp_file), \"x\") as f:\n f.write('echo \"Hello World, from a temporary pre-start shell script\"\\n')\n return Path(tmp_file)",
"def test_trialPathInsert(self):\n script = self.bin.child(\"trial\")\n if not script.exists():\n raise SkipTest(\"Script tests do not apply to installed configuration.\")\n cwd = getcwd()\n self.addCleanup(chdir, cwd)\n testDir = FilePath(self.mktemp())\n testDir.makedirs()\n chdir(testDir.path)\n testDir.child(\"foo.py\").setContent(\"\")\n output = outputFromPythonScript(script, \"foo\")\n self.assertIn(\"PASSED\", output)",
"def make_script(name, data, target, execpath=False):\n exec_path = join('/tmp', name + '-script')\n target_path = join(target, 'tmp', name + '-script')\n sfile = file(target_path, 'w')\n sfile.write(data.read())\n sfile.close()\n os.system('chmod 755 %s' % target_path)\n if not execpath:\n return target_path\n else:\n # for chroot target exec_path\n return exec_path",
"def getExecutableScript( executable, arguments=[], proxy=None, sandboxDict = {}, environDict={}, execDir='' ):\n if type( arguments) in types.StringTypes:\n arguments = arguments.split(' ')\n compressedAndEncodedFiles = {}\n if proxy:\n compressedAndEncodedFiles['.proxy'] = base64.encodestring( bz2.compress( proxy.dumpAllToString()['Value'] ) ).replace('\\n','')\n for fileName, filePath in sandboxDict.items():\n encodedFile = base64.encodestring( bz2.compress( open( filePath, \"rb\" ).read() ) ).replace('\\n','')\n compressedAndEncodedFiles[fileName] = encodedFile\n\n script = \"\"\"#!/usr/bin/env python\ntry:\n import os, tempfile, sys, shutil, base64, bz2, subprocess, datetime\nexcept:\n print 'Failed to import os, tempfile, sys, shutil, base64, bz2, subprocess'\n print 'Unsupported python version'\n exit(1)\n\nprint 'START TIME:', datetime.datetime(2000,1,1).utcnow(), 'UTC'\nprint\n\n# 1. Get Name of the executable\nexecutable = '%(executable)s'\ncmdTuple = [ executable ]\ncmdTuple.extend( %(arguments)s )\n\n# 2. Print environment\nprint '==========================================================='\nprint\nprint 'Existing Environment:'\nprint\nfor key, value in os.environ.items():\n print key, '=', value\nprint\nprint 'Added Environment:'\nprint\n\n# 3. Set environment\nenvironDict = %(environDict)s\nif 'LD_LIBRARY_PATH' not in os.environ and 'LD_LIBRARY_PATH' not in environDict:\n environDict['LD_LIBRARY_PATH'] = ''\n\nfor key, value in environDict.items():\n os.environ[key] = value\n print key, '=', value\n\n# 4. Create Working Directory\nexecDir = '%(execDir)s'\nif not execDir:\n execDir = None\nelse:\n execDir = os.path.expanduser( os.path.expandvars( execDir ) )\nworkingDirectory = tempfile.mkdtemp( suffix = 'pilot', prefix = 'DIRAC_', dir = execDir )\nos.chdir( workingDirectory )\nos.environ['X509_CERT_DIR'] = os.path.join( workingDirectory, 'etc', 'grid-security', 'certificates' )\n\n# 5. Extract Sandbox files\nfor fileName, fileCont in %(compressedAndEncodedFiles)s.items():\n f = open( fileName, 'w' )\n f.write( bz2.decompress( base64.decodestring( fileCont ) ) )\n f.close()\n if fileName == '.proxy':\n os.chmod( fileName, 0600 )\n os.environ['X509_USER_PROXY'] = os.path.join( workingDirectory, fileName )\n print 'X509_USER_PROXY', '=', os.path.join( workingDirectory, fileName )\n elif fileName == executable:\n os.chmod( fileName, 0755 )\n executable = './' + executable\nprint\nprint '==========================================================='\nprint\n\n# 6. Executing\ncmdTuple = [ os.path.expanduser( os.path.expandvars( k ) ) for k in cmdTuple ]\nprint 'Executing: ', ' '.join( cmdTuple )\nprint 'at:', os.getcwd()\nprint\nsys.stdout.flush()\ntry:\n exitCode = subprocess.call( cmdTuple )\n if exitCode < 0:\n print >> sys.stderr, 'Command killed by signal', - exitCode\n if exitCode > 0:\n print >> sys.stderr, 'Command returned', exitCode\nexcept OSError, e:\n exitCode = -1\n print >> sys.stderr, \"Execution failed:\", e\n\nshutil.rmtree( workingDirectory )\nmyDate = datetime.datetime(2000,1,1).utcnow()\nprint\nprint 'END TIME:', datetime.datetime(2000,1,1).utcnow(), 'UTC'\n\nexit( exitCode )\n\"\"\" % { 'execDir': execDir,\n 'executable': executable,\n 'compressedAndEncodedFiles': compressedAndEncodedFiles,\n 'arguments': arguments,\n 'environDict': environDict, }\n\n return script",
"def _execute(script, prefix=None, path=None):\n path = tempfile.gettempdir() if path is None else path\n result = 1\n try:\n fh = tempfile.NamedTemporaryFile('w', delete=False)\n fh.write(script)\n fh.close()\n print('Executing script below with cwd=%s\\n{{{\\n%s\\n}}}\\n' %\n (path, script))\n try:\n os.chmod(fh.name, stat.S_IRWXU)\n env = os.environ.copy()\n if prefix is not None:\n env['COLCON_BUNDLE_INSTALL_PREFIX'] = prefix\n result = subprocess.run(\n fh.name, cwd=path, env=env, stdout=PIPE, stderr=PIPE,\n universal_newlines=True)\n if result.stdout is not None:\n logger.debug('stdout output: \\n' + result.stdout)\n if result.stderr is not None:\n logger.warn('stderr output: \\n' + result.stderr)\n except OSError as ex:\n print('Execution failed with OSError: %s' % ex)\n finally:\n if os.path.exists(fh.name):\n os.remove(fh.name)\n logger.info('Return code was: %s' % result)\n return result.returncode == 0",
"def _install_ff_locally(self, path, ff_exe):\n\n if sys.platform.startswith('win'):\n # Windows: copy the whole tuntime\n copy_xul_runtime(op.dirname(ff_exe), path)\n else:\n # OSX / Linux: create a symlink to xul runtime exe\n os.mkdir(path)\n stub_exe = op.join(path, 'xulrunner')\n os.symlink(ff_exe, stub_exe)\n return stub_exe",
"def setup(path):\n if platform.system != \"Windows\":\n cwd = os.getcwd()\n\n os.chdir(path)\n\n os.system(\"sh setup.sh\")\n\n os.chdir(cwd)",
"def test_create_script(executable, expected):\n filename = os.path.join(tempfile.gettempdir(), \"hello_world\")\n text = \"echo 'Hello World'\"\n\n create_script(filename, text, executable)\n assert os.path.exists(filename)\n\n # Disabling because it doesn't work on Windows.\n # s = os.stat(filename)\n # assert s.st_mode == expected\n\n if os.path.exists(filename):\n os.remove(filename)",
"def _install():\n\tprint \"Preparing to install {} script.\".format(SCRIPT_NAME)\n\t\n\t#make sure there is a place to install the script to.\n\tif not \"SCRIPTS\" in os.environ:\n\t\tprint \"Please set SCRIPTS environment variable.\"\n\t\tsys.exit(1)\n\t\n\tscript_dir = os.environ[\"SCRIPTS\"]\n\t\n\t#check to see if already installed\n\tif _is_already_installed(script_dir):\n\t\tprint \"A version of {} is already installed.\".format(SCRIPT_NAME)\n\t\tprint \"Do you wish to overwrite it? [Y,n]\"\n\t\tif raw_input() != 'Y':\n\t\t\tprint \"Cancelling installation of {}.\".format(SCRIPT_NAME)\n\t\t\tsys.exit(0)\n\t\telse:\n\t\t\tprint \"Overwritting previously installed script {}.\".format(SCRIPT_NAME)\n\t\t\t_uninstall()\n\t\n\t#copy python sources into script directory\n\tnew_dir = os.path.join(script_dir, SCRIPT_NAME)\n\tshutil.copytree(\"src\", new_dir)\n\t\n\t#copy executable and add permissions\n\tfor name in EXEC_NAMES:\n\t\tos.system(\"sudo cp bin/{0} /bin/{0}\".format(name))\n\t\tos.system(\"sudo chmod +x /bin/{}\".format(name))",
"def setUpModule():\n # Create a temporary directory where we can create a fake notify-send\n # program that is guaranteed to exist and will run successfully, but\n # without actually bothering the user with interactive notifications.\n directory = tempfile.mkdtemp(prefix='rsync-system-backup-', suffix='-fake-path')\n TEMPORARY_DIRECTORIES.append(directory)\n fake_program = os.path.join(directory, 'notify-send')\n candidates = which('true')\n os.symlink(candidates[0], fake_program)\n # Add the directory to the $PATH.\n path = get_search_path()\n path.insert(0, directory)\n os.environ['PATH'] = os.pathsep.join(path)",
"def _prepend_remote_shell_script( self, script, remote_path, **put_kwargs ):\n with closing( StringIO( ) ) as out_file:\n with closing( StringIO( ) ) as in_file:\n get( remote_path=remote_path, local_path=in_file )\n in_file.seek( 0 )\n prepend_shell_script( '\\n' + script, in_file, out_file )\n out_file.seek( 0 )\n put( remote_path=remote_path, local_path=out_file, **put_kwargs )",
"def test_remote_sys_path(pytester: pytest.Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import sys\n\n def test_sys_path():\n assert \"\" not in sys.path\n \"\"\"\n )\n result = pytester.runpytest(\"-n1\")\n assert result.ret == 0",
"def script_test(path):\n log.info(\" ... EXECUTING {}\".format(str(path)))\n\n cmd = [sys.executable, str(path)]\n cp = subprocess.run(cmd, stderr=subprocess.PIPE)\n if cp.returncode:\n log.info(\" ... FAILED\")\n log.info(\" ___ TRACEBACK\")\n log.info(cp.stderr.decode(\"utf-8\") + \"\\n\\n\")\n return False\n else:\n log.info(\" ... PASSED\")\n return True",
"def test_path_filename():\n mock_path = \"E:\\\\Repos\\\\pc-setup\\\\powershell\\\\provision_python.ps1\"\n output = sh.path_filename(mock_path)\n assert output == \"provision_python\"",
"def test_twistdPathInsert(self):\n script = self.bin.child(\"twistd\")\n if not script.exists():\n raise SkipTest(\"Script tests do not apply to installed configuration.\")\n cwd = getcwd()\n self.addCleanup(chdir, cwd)\n testDir = FilePath(self.mktemp())\n testDir.makedirs()\n chdir(testDir.path)\n testDir.child(\"bar.tac\").setContent(\"import sys\\n\" \"print sys.path\\n\")\n output = outputFromPythonScript(script, \"-ny\", \"bar.tac\")\n self.assertIn(repr(testDir.path), output)",
"def run_setup_script(self, script_path):\n try:\n f = open(script_path, 'r')\n setup_script = f.read()\n # print(setup_script)\n c = self.conn.cursor()\n c.executescript(setup_script)\n except (Error, IOError) as e:\n print('[Datanase] Error:')\n print(e)",
"def test_qt_creator_prebuild_script_cmd_line(self):\n for d in [app_zip_file(), libs_zip_file()]:\n self.assertFalse(os.path.exists(d))\n\n cmd = ['pydroid', 'qt_creator_prebuild_script', PROJECT_DIR]\n subprocess.call(cmd)\n\n for d in [app_zip_file(), libs_zip_file()]:\n self.assertTrue(os.path.exists(d))",
"def script_from_string(self, script: str) -> LocalScriptRunner:\n hasher = hashlib.md5(bytes(script, \"utf-8\"))\n script_name = hasher.hexdigest()\n\n path = pathlib.Path(self.tmp_script_dir.name, script_name)\n aligned_script = textwrap.dedent(script)\n path.write_text(aligned_script)\n return LocalScriptRunner(str(path))",
"def run_script(script_path, cwd='.'):\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n try:\n proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd) # nosec\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n f'Hook script failed (exit status: {exit_status})'\n )\n except OSError as err:\n if err.errno == errno.ENOEXEC:\n raise FailedHookException(\n 'Hook script failed, might be an empty file or missing a shebang'\n ) from err\n raise FailedHookException(f'Hook script failed (error: {err})') from err",
"def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))",
"def create_bootstrap_script(scratch_dir):\n install_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"install\")\n shutil.copy(install_script, os.path.join(scratch_dir, \"install\"))",
"def setup(self, **kwargs):\n if self.bash_script:\n src = os.fspath(FILES / self.bash_script)\n dst = os.fspath(self.project_dir / self.bash_script)\n shutil.copy(src, dst)",
"def test_pre_hooks(self):\n os.makedirs('/tmp/localhost/pacha_pre')\n touch_script = open('/tmp/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/localhost/pre_got_executed.txt''')\n touch_script.close()\n run = rebuild.Rebuild(hostname='localhost') \n run.pre_hooks()\n self.assertTrue(os.path.isfile('/tmp/localhost/pre_got_executed.txt'))",
"async def test_script_main(config, mocker, monkeypatch, path_map_mock):\n for key in config.keys():\n monkeypatch.setenv(key, config[key])\n mock_event_loop = mocker.patch(\"asyncio.get_event_loop\")\n mock_root_logger = mocker.patch(\"logging.getLogger\")\n mock_status_loop = mocker.patch(\"lta.unpacker.status_loop\")\n mock_work_loop = mocker.patch(\"lta.unpacker.work_loop\")\n main()\n mock_event_loop.assert_called()\n mock_root_logger.assert_called()\n mock_status_loop.assert_called()\n mock_work_loop.assert_called()",
"def _register_test_script(self, qualified_name):\n desc = self._get_or_create_script('openerp_tester',\n name=qualified_name)[1]\n arguments = '%r, %r, version=%r, just_test=True' % (\n self._get_server_command(),\n self.config_path,\n self.major_version)\n arguments += ', gevent_script_path=%r' % self.gevent_script_path\n\n desc.update(\n entry='openerp_starter',\n initialization=os.linesep.join((\n \"from anybox.recipe.odoo import devtools\",\n \"devtools.load(for_tests=True)\",\n \"\")),\n arguments=arguments\n )",
"def attach_path(path):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), path)",
"def test_execute_and_import():\n code = dedent('''\n import os\n print os.path\n ''')\n results = ExecuteCode.execute_code(code)\n\n assert results != None\n assert results != ''",
"def install_script_stored_on_remote(script_dir, script_name, mode=775, owner='root'):\n full_path = os.path.join(script_dir, script_name)\n\n with cd(script_dir):\n sudo(\"chmod {} {}\".format(mode, script_name))\n sudo(\"chown {} {}\".format(owner, script_name))\n sudo(\"ln -sf {} {}\".format(full_path, env.system_script_dir))",
"def do_dload_shim(executable_path, names, actions):\n argv = sys.argv\n r = runfiles.Create()\n # NOTE(hidmic): unlike its C++ equivalent, Python runfiles'\n # builtin tools will only look for runfiles in the manifest\n # if there is a manifest\n runfiles_dir = r.EnvVars()['RUNFILES_DIR']\n\n def rlocation(path):\n return r.Rlocation(path) or os.path.join(runfiles_dir, path)\n\n if SHIMMED_SENTINEL not in os.environ:\n for name, action in zip(names, actions): # noqa\n action_type, action_args = action[0], action[1:]\n if action_type == 'replace':\n assert len(action_args) == 1\n value = action_args[0]\n elif action_type == 'set-if-not-set':\n assert len(action_args) == 1\n if name in os.environ:\n continue\n value = action_args[0]\n elif action_type == 'path-replace':\n assert len(action_args) == 1\n value = rlocation(action_args[0])\n elif action_type == 'path-prepend':\n assert len(action_args) > 0\n value = ':'.join([rlocation(path) for path in action_args])\n if name in os.environ:\n value += ':' + os.environ[name]\n else:\n assert False # should never get here\n if '$PWD' in value:\n value = value.replace('$PWD', os.getcwd())\n os.environ[name] = value\n os.environ[SHIMMED_SENTINEL] = \"\"\n\n real_executable_path = r.Rlocation(executable_path) # noqa\n argv = [real_executable_path] + argv[1:]\n os.execv(real_executable_path, argv)"
]
| [
"0.62975913",
"0.62420475",
"0.5760374",
"0.56799525",
"0.5622587",
"0.550792",
"0.5498481",
"0.5466002",
"0.54401475",
"0.54278964",
"0.5374592",
"0.5362588",
"0.53110266",
"0.5258041",
"0.5247601",
"0.52420473",
"0.52388513",
"0.52318",
"0.52104723",
"0.5209367",
"0.51986456",
"0.51759934",
"0.5158359",
"0.5129942",
"0.5120297",
"0.51134735",
"0.5083342",
"0.5062293",
"0.50411016",
"0.5040878"
]
| 0.6845354 | 0 |
Scriptsrelated test teardown handler. Scripts created by the test are removed on teardown. The environment is reset to its state before the test run. | def teardown_scripts(test=None):
for key, value in original_environ.iteritems():
if value is None:
del os.environ[key]
else:
os.environ[key] = value
original_environ.clear()
for path in tmp_paths:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
del tmp_paths[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tearDown(self):\n test_env_teardown()",
"def tearDown(self):\n tests.utils.cleanup_environment()",
"def tearDown(self):\n tests.utils.cleanup_environment()",
"def teardown_test_env():\n if not keep_tmp_dirs:\n print('\\nCleaning up temporary directories...')\n shutil.rmtree(tmp_elm_dpath, ignore_errors=True)\n shutil.rmtree(tmp_elm_examples_dpath, ignore_errors=True)\n\n print('Removing conda environment used for testing...')\n sp.call('conda env remove -y -q -n {}'.format(test_env_name), shell=True, executable='/bin/bash', stdout=sp.DEVNULL)",
"def tearDown(self) -> None:\n\n self.temp_env_file.close()\n os.remove(self.temp_env_file.name)\n\n del self.temp_env_file\n del self.test_name\n del self.helper",
"def teardown(self):\n del self.testInst, self.dname\n\n return",
"def teardown(self):\n pass",
"def teardown(self):\n pass",
"def teardown(self):\n pass",
"def teardown(request, exec_env):\n\n def fin():\n if exec_env.get_script_state() == \"RUNNING\":\n resp = exec_env.run_oet_command(\"stop\", \"--run_abort=False\")\n assert \"Successfully stopped\" in resp\n\n request.addfinalizer(fin)",
"def teardown_application(self):\n pass",
"def teardown(self) -> None:",
"def teardown(self) -> None:",
"def teardown(self) -> None:",
"def teardown(self) -> None:\n pass",
"def teardown(self) -> None:\n pass",
"def teardown_class(self):\n\n # TODO: If environment variable is set keep the workspace\n # and print out the path.\n global TEST_WORKSPACE\n\n check_env = env.import_test_cfg(TEST_WORKSPACE)[\n 'codechecker_cfg']['check_env']\n codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)\n\n print(\"Removing: \" + TEST_WORKSPACE)\n shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)",
"def teardown_class(self):\n\n # TODO: If environment variable is set keep the workspace\n # and print out the path.\n global TEST_WORKSPACE\n\n check_env = env.import_test_cfg(TEST_WORKSPACE)[\n 'codechecker_cfg']['check_env']\n codechecker.remove_test_package_product(TEST_WORKSPACE, check_env)\n\n print(\"Removing: \" + TEST_WORKSPACE)\n shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)",
"def teardown_package():\n # TODO If environment variable is set keep the workspace\n # and print out the path.\n global TEST_WORKSPACE\n\n # Removing the product through this server requires credentials.\n codechecker_cfg = env.import_test_cfg(TEST_WORKSPACE)['codechecker_cfg']\n codechecker.remove_test_package_product(TEST_WORKSPACE,\n codechecker_cfg['check_env'])\n\n __STOP_SERVER.set()\n\n # The custom server stated in a separate home needs to be waited, so it\n # can properly execute its finalizers.\n time.sleep(5)\n\n print(\"Removing: \" + TEST_WORKSPACE)\n shutil.rmtree(TEST_WORKSPACE, ignore_errors=True)",
"def tearDown(self):\n self.testbed.deactivate()",
"def tearDown(self):\n self.testbed.deactivate()",
"def teardown(self):\n pass # pylint: disable=unnecessary-pass",
"def _teardown_dut(duthost, ptfhost, request):\n logger.info(\"Teardown SAI tests.\")\n _collect_test_result(duthost, ptfhost, request)\n _cleanup_ptf(ptfhost)",
"def teardown(self):",
"def teardown(self):",
"def teardown(self):",
"def _teardown(self):\n # No-op base implementation",
"def tearDownClass(cls):\n for testfile in [cls.testfile, cls.testyfile, cls.testbrfile, cls.testlog]:\n if os.path.exists(testfile):\n os.remove(testfile)\n\n for e in cls.origEnv:\n if cls.origEnv[e] is None:\n del os.environ[e]\n else:\n os.environ[e] = cls.origEnv[e]\n\n if os.path.exists(cls.testDir):\n rmtree(cls.testDir)",
"def _tear_down():\n repl._tearDown = self.tearDown",
"def tearDown(self):\n self.teardown_beets()"
]
| [
"0.7965651",
"0.7953725",
"0.7953725",
"0.78178906",
"0.74833065",
"0.73717785",
"0.7350228",
"0.7350228",
"0.7350228",
"0.73106354",
"0.7263392",
"0.7243588",
"0.7243588",
"0.7243588",
"0.72310805",
"0.72310805",
"0.71890324",
"0.71890324",
"0.71752936",
"0.7117237",
"0.7117237",
"0.7110318",
"0.70893824",
"0.7084381",
"0.7084381",
"0.7084381",
"0.7083347",
"0.7063411",
"0.7059421",
"0.70553756"
]
| 0.79659957 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.