query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Register HTR (mainnet) and XHTR (testnet) in pycoin networks
def _register_pycoin_networks() -> None: import os global _registered_pycoin if _registered_pycoin: return _registered_pycoin = True paths = os.environ.get('PYCOIN_NETWORK_PATHS', '').split() if 'hathor.pycoin' not in paths: paths.append('hathor.pycoin') os.environ['PYCOIN_NETWORK_PATHS'] = ' '.join(paths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_register_network(self):\n pass", "def register_network(key, module):\n register(key, module, network_dict)", "def register_networks(backend):\n from .network_desc import NetworkDesc\n from .adelaide import AdelaideFastNAS\n from .erdb_esr import ESRN\n from .mobilenet import MobileNetV3Tiny, MobileNetV2Tiny\n from .mobilenetv3 import MobileNetV3Small, MobileNetV3Large\n from .sgas_network import SGASNetwork\n from .necks import FPN\n from .bert import BertClassifier\n from . import resnet\n from . import quant\n from . import mtm_sr\n from . import super_network\n from . import resnet_det\n from . import resnet_general\n from . import resnext_det\n from . import xt_model\n from . import text_cnn\n from . import faster_rcnn\n if backend == \"pytorch\":\n from . import spnet_backbone\n from . import faster_backbone\n from . import pytorch\n elif backend == \"tensorflow\":\n from . import spnet_backbone\n from . import faster_backbone\n from . import tensorflow\n elif backend == \"mindspore\":\n from . import mindspore", "def network(c, with_friendbot=False):\n if with_friendbot:\n start_friendbot(c)\n\n base_reserve_0()\n protocol_version_9()\n tx_set_size_500()\n create_whitelist_account()\n\n print('Root account seed: {}'.format(derive_root_account_seed(PASSPHRASE)))\n print('Network is up')", "def test_coins(self):\n\n coins = {\n \"btc\": {\n \"backend\": {\n \"class\": \"cryptoassets.core.backend.bitcoind.Bitcoind\",\n \"url\": \"http://foo:[email protected]:8332/\",\n }\n }\n }\n\n coin_registry = self.configurator.setup_coins(coins)\n\n self.assertIsInstance(coin_registry.get(\"btc\"), Coin)\n coin = coin_registry.get(\"btc\")\n\n self.assertIsInstance(coin.backend, Bitcoind)\n self.assertEqual(coin.wallet_model, BitcoinWallet)", "def test_add_network(self):\n pass", "def onRegisterNetworkNode(self):\n pass", "def __init__(self, tx, testnet=False):\n SelectParams(\"testnet\" if testnet else \"mainnet\")\n self.tx = tx\n self.log = Logger(system=self)", "def push_tx(tx, network='testnet', fee=False):\n\n if network in ['testnet', 'main']:\n if network is 'testnet':\n if fee:\n url = 'http://tbtc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/test3/txs/push'\n elif network is 'main':\n if fee:\n url = 'http://btc.blockr.io/api/v1/tx/push'\n else:\n url = 'https://api.blockcypher.com/v1/btc/main/txs/push'\n\n if fee:\n data = {'hex': tx}\n else:\n data = {'tx': tx}\n\n response = post(url, data=json.dumps(data))\n else:\n response = 'Bad network'\n\n r_code = response.status_code\n r_reason = response.reason\n\n if r_code is 200:\n # blockr server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['data'])\n elif r_code is 201:\n # blockcyper server\n pushed_tx = json.loads(response.content)\n tx_hash = str(pushed_tx['tx']['hash'])\n else:\n tx_hash = None\n\n return r_code, r_reason, tx_hash", "def startup( self ):\n # ---- Setup UPNPC ----\n if self.config.neuron.use_upnpc:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<green>ON</green>')\n try:\n self.external_port = net.upnpc_create_port_map( port = self.axon.port )\n except net.UPNPCException as upnpc_exception:\n logger.critical('Failed to hole-punch with upnpc')\n raise RuntimeError('Failed to hole-punch with upnpc')\n else:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<red>OFF</red>')\n self.external_port = self.config.axon.port\n\n # ---- Get external ip ----\n try:\n self.external_ip = net.get_external_ip()\n bittensor.logging.success(prefix = 'External IP', sufix = '<blue>{}</blue>'.format(self.external_ip))\n except net.ExternalIPNotFound as external_port_exception:\n raise RuntimeError('Unable to attain your external ip. Check your internet connection. error:{}', external_port_exception)\n\n # ---- Setup tensorboard ----\n if self.config.neuron.use_tensorboard == True:\n self._tensorboard_program = program.TensorBoard()\n self._tensorboard_program.configure(argv=[None, '--logdir', self.config.neuron.full_path, '--load_fast=true'])\n self._tensorbaord_url = self._tensorboard_program.launch()\n bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<blue>http://localhost:6006/</blue>')\n else: bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<red>OFF</red>')\n\n # ---- Setup Wallet. ----\n if not self.wallet.has_coldkeypub:\n self.wallet.create_new_coldkey( n_words = 12, use_password = True )\n if not self.wallet.has_coldkeypub:\n raise RuntimeError('Miner must have access to a decrypted coldkeypub')\n if not self.wallet.has_hotkey:\n self.wallet.create_new_hotkey( n_words = 12, use_password = False )\n if not self.wallet.has_hotkey:\n raise RuntimeError('Miner must have access to a decrypted hotkey')\n\n # ---- Subscribe to chain ----\n subscribe_success = self.subtensor.subscribe(\n wallet = self.wallet,\n ip = self.external_ip,\n port = self.external_port,\n modality = bittensor.proto.Modality.TEXT,\n wait_for_finalization = True,\n timeout = 4 * bittensor.__blocktime__,\n )\n if not subscribe_success:\n raise RuntimeError('Failed to subscribe neuron.')\n\n # ---- Starting axon ----\n self.axon.start()", "def setup_net(self):\n pass", "def web3(chain):\n return Web3(EthereumTesterProvider(chain))", "def chain():\n return eth_tester.EthereumTester(eth_tester.PyEVMBackend())", "async def test_complex_nft_offer(\n self_hostname: str, two_wallet_nodes: Any, trusted: Any, royalty_pts: Tuple[int, int, int]\n) -> None:\n full_nodes, wallets, _ = two_wallet_nodes\n full_node_api: FullNodeSimulator = full_nodes[0]\n full_node_server = full_node_api.server\n wallet_node_maker, server_0 = wallets[0]\n wallet_node_taker, server_1 = wallets[1]\n wsm_maker = wallet_node_maker.wallet_state_manager\n wsm_taker = wallet_node_taker.wallet_state_manager\n wallet_maker = wsm_maker.main_wallet\n wallet_taker = wsm_taker.main_wallet\n\n ph_maker = await wallet_maker.get_new_puzzlehash()\n ph_taker = await wallet_taker.get_new_puzzlehash()\n ph_token = bytes32(token_bytes())\n if trusted:\n wallet_node_maker.config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n wallet_node_taker.config[\"trusted_peers\"] = {\n full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()\n }\n else:\n wallet_node_maker.config[\"trusted_peers\"] = {}\n wallet_node_taker.config[\"trusted_peers\"] = {}\n wallet_node_maker.config[\"automatically_add_unknown_cats\"] = True\n wallet_node_taker.config[\"automatically_add_unknown_cats\"] = True\n\n await server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)\n await server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)\n\n # Need money for fees and offering\n for i in range(0, 2):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_maker))\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_taker))\n blocks_needed = 3\n for i in range(blocks_needed):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_taker))\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n await full_node_api.wait_for_wallets_synced(wallet_nodes=[wallet_node_maker, wallet_node_taker], timeout=30)\n\n funds_maker = sum([calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, 3)])\n funds_taker = sum(\n [\n calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))\n for i in range(1, 3 + blocks_needed)\n ]\n )\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n CAT_AMOUNT = uint64(100000000)\n async with wsm_maker.lock:\n cat_wallet_maker: CATWallet = await CATWallet.create_new_cat_wallet(\n wsm_maker, wallet_maker, {\"identifier\": \"genesis_by_id\"}, CAT_AMOUNT, DEFAULT_TX_CONFIG\n )\n async with wsm_maker.lock:\n cat_wallet_taker: CATWallet = await CATWallet.create_new_cat_wallet(\n wsm_taker, wallet_taker, {\"identifier\": \"genesis_by_id\"}, CAT_AMOUNT, DEFAULT_TX_CONFIG\n )\n cat_spend_bundle_maker = (\n await wallet_node_maker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(wallet_maker.id())\n )[0].spend_bundle\n cat_spend_bundle_taker = (\n await wallet_node_taker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(wallet_taker.id())\n )[0].spend_bundle\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, cat_spend_bundle_maker.name()\n )\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, cat_spend_bundle_taker.name()\n )\n\n # We'll need these later\n basic_nft_wallet_maker = await NFTWallet.create_new_nft_wallet(wsm_maker, wallet_maker, name=\"NFT WALLET MAKER\")\n basic_nft_wallet_taker = await NFTWallet.create_new_nft_wallet(wsm_taker, wallet_taker, name=\"NFT WALLET TAKER\")\n\n did_wallet_maker: DIDWallet = await DIDWallet.create_new_did_wallet(wsm_maker, wallet_maker, uint64(1))\n did_wallet_taker: DIDWallet = await DIDWallet.create_new_did_wallet(wsm_taker, wallet_taker, uint64(1))\n did_spend_bundle_maker = (\n await wallet_node_maker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet_maker.id())\n )[0].spend_bundle\n did_spend_bundle_taker = (\n await wallet_node_taker.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet_taker.id())\n )[0].spend_bundle\n\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, did_spend_bundle_maker.name()\n )\n await time_out_assert_not_none(\n 5, full_node_api.full_node.mempool_manager.get_spendbundle, did_spend_bundle_taker.name()\n )\n\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n funds_maker = funds_maker - 1 - CAT_AMOUNT\n funds_taker = funds_taker - 1 - CAT_AMOUNT\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n await time_out_assert(30, cat_wallet_maker.get_confirmed_balance, CAT_AMOUNT)\n await time_out_assert(30, cat_wallet_maker.get_unconfirmed_balance, CAT_AMOUNT)\n await time_out_assert(30, cat_wallet_taker.get_confirmed_balance, CAT_AMOUNT)\n await time_out_assert(30, cat_wallet_taker.get_unconfirmed_balance, CAT_AMOUNT)\n did_id_maker = bytes32.fromhex(did_wallet_maker.get_my_DID())\n did_id_taker = bytes32.fromhex(did_wallet_taker.get_my_DID())\n target_puzhash_maker = ph_maker\n target_puzhash_taker = ph_taker\n royalty_puzhash_maker = ph_maker\n royalty_puzhash_taker = ph_taker\n royalty_basis_pts_maker, royalty_basis_pts_taker_1, royalty_basis_pts_taker_2 = (\n royalty_pts[0],\n uint16(royalty_pts[1]),\n uint16(royalty_pts[2]),\n )\n\n nft_wallet_maker = await NFTWallet.create_new_nft_wallet(\n wallet_node_maker.wallet_state_manager, wallet_maker, name=\"NFT WALLET DID 1\", did_id=did_id_maker\n )\n nft_wallet_taker = await NFTWallet.create_new_nft_wallet(\n wallet_node_taker.wallet_state_manager, wallet_taker, name=\"NFT WALLET DID 1\", did_id=did_id_taker\n )\n metadata = Program.to(\n [\n (\"u\", [\"https://www.chia.net/img/branding/chia-logo.svg\"]),\n (\"h\", \"0xD4584AD463139FA8C0D9F68F4B59F185\"),\n ]\n )\n if royalty_basis_pts_maker > 65535:\n with pytest.raises(ValueError):\n await nft_wallet_maker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_maker,\n royalty_puzhash_maker,\n royalty_basis_pts_maker, # type: ignore\n did_id_maker,\n )\n return\n else:\n sb_maker = await nft_wallet_maker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_maker,\n royalty_puzhash_maker,\n uint16(royalty_basis_pts_maker),\n did_id_maker,\n )\n\n sb_taker_1 = await nft_wallet_taker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_taker,\n royalty_puzhash_taker,\n royalty_basis_pts_taker_1,\n did_id_taker,\n )\n assert sb_maker is not None\n assert sb_taker_1 is not None\n await time_out_assert_not_none(10, full_node_api.full_node.mempool_manager.get_spendbundle, sb_maker.name())\n await time_out_assert_not_none(10, full_node_api.full_node.mempool_manager.get_spendbundle, sb_taker_1.name())\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n funds_maker -= 1\n funds_taker -= 1\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n await time_out_assert(30, get_nft_count, 1, nft_wallet_maker)\n await time_out_assert(30, get_nft_count, 1, nft_wallet_taker)\n\n # MAke one more NFT for the taker\n sb_taker_2 = await nft_wallet_taker.generate_new_nft(\n metadata,\n DEFAULT_TX_CONFIG,\n target_puzhash_taker,\n royalty_puzhash_taker,\n royalty_basis_pts_taker_2,\n did_id_taker,\n )\n assert sb_taker_2 is not None\n await time_out_assert_not_none(10, full_node_api.full_node.mempool_manager.get_spendbundle, sb_taker_2.name())\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n funds_taker -= 1\n\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n await time_out_assert(30, get_nft_count, 2, nft_wallet_taker)\n\n trade_manager_maker = wsm_maker.trade_manager\n trade_manager_taker = wsm_taker.trade_manager\n maker_nfts = await nft_wallet_maker.get_current_nfts()\n taker_nfts = await nft_wallet_taker.get_current_nfts()\n nft_to_offer_asset_id_maker: bytes32 = maker_nfts[0].nft_id\n nft_to_offer_asset_id_taker_1: bytes32 = taker_nfts[0].nft_id\n nft_to_offer_asset_id_taker_2: bytes32 = taker_nfts[1].nft_id\n if royalty_basis_pts_maker > 60000:\n XCH_REQUESTED = 20000\n CAT_REQUESTED = 1000\n FEE = uint64(20000)\n else:\n XCH_REQUESTED = 2000000000000\n CAT_REQUESTED = 100000\n FEE = uint64(2000000000000)\n\n complex_nft_offer = {\n nft_to_offer_asset_id_maker: -1,\n cat_wallet_maker.id(): CAT_REQUESTED * -1,\n 1: XCH_REQUESTED,\n nft_to_offer_asset_id_taker_1: 1,\n nft_to_offer_asset_id_taker_2: 1,\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): CAT_REQUESTED,\n }\n\n driver_dict = {\n nft_to_offer_asset_id_taker_1: match_puzzle(uncurry_puzzle(taker_nfts[0].full_puzzle)),\n nft_to_offer_asset_id_taker_2: match_puzzle(uncurry_puzzle(taker_nfts[1].full_puzzle)),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): PuzzleInfo(\n {\n \"type\": \"CAT\",\n \"tail\": \"0x\" + cat_wallet_taker.get_asset_id(),\n }\n ),\n }\n\n success, trade_make, error = await trade_manager_maker.create_offer_for_ids(\n complex_nft_offer, DEFAULT_TX_CONFIG, driver_dict=driver_dict, fee=FEE\n )\n assert error is None\n assert success\n assert trade_make is not None\n\n if royalty_basis_pts_maker == 10000:\n with pytest.raises(ValueError):\n trade_take, tx_records = await trade_manager_taker.respond_to_offer(\n Offer.from_bytes(trade_make.offer),\n wallet_node_taker.get_full_node_peer(),\n DEFAULT_TX_CONFIG,\n fee=FEE,\n )\n # all done for this test\n return\n else:\n trade_take, tx_records = await trade_manager_taker.respond_to_offer(\n Offer.from_bytes(trade_make.offer),\n wallet_node_taker.get_full_node_peer(),\n DEFAULT_TX_CONFIG,\n fee=FEE,\n )\n assert trade_take is not None\n assert tx_records is not None\n await full_node_api.process_transaction_records(records=tx_records)\n\n # Now let's make sure the final wallet state is correct\n maker_royalty_summary = NFTWallet.royalty_calculation(\n {\n nft_to_offer_asset_id_maker: (royalty_puzhash_maker, uint16(royalty_basis_pts_maker)),\n },\n {\n None: uint64(XCH_REQUESTED),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): uint64(CAT_REQUESTED),\n },\n )\n taker_royalty_summary = NFTWallet.royalty_calculation(\n {\n nft_to_offer_asset_id_taker_1: (royalty_puzhash_taker, royalty_basis_pts_taker_1),\n nft_to_offer_asset_id_taker_2: (royalty_puzhash_taker, royalty_basis_pts_taker_2),\n },\n {\n bytes32.from_hexstr(cat_wallet_maker.get_asset_id()): uint64(CAT_REQUESTED),\n },\n )\n maker_xch_royalties_expected = maker_royalty_summary[nft_to_offer_asset_id_maker][0][\"amount\"]\n maker_cat_royalties_expected = maker_royalty_summary[nft_to_offer_asset_id_maker][1][\"amount\"]\n taker_cat_royalties_expected = (\n taker_royalty_summary[nft_to_offer_asset_id_taker_1][0][\"amount\"]\n + taker_royalty_summary[nft_to_offer_asset_id_taker_2][0][\"amount\"]\n )\n funds_maker = int(funds_maker - FEE + XCH_REQUESTED + maker_xch_royalties_expected)\n funds_taker = int(funds_taker - FEE - XCH_REQUESTED - maker_xch_royalties_expected)\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n\n async def get_cat_wallet_and_check_balance(asset_id: str, wsm: Any) -> uint128:\n cat_wallet = await wsm.get_wallet_for_asset_id(asset_id)\n if cat_wallet is None:\n return uint128(0)\n else:\n return uint128(await cat_wallet.get_confirmed_balance())\n\n taker_cat_funds_maker = CAT_REQUESTED + maker_cat_royalties_expected\n maker_cat_funds_taker = CAT_REQUESTED + taker_cat_royalties_expected\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n taker_cat_funds_maker,\n cat_wallet_taker.get_asset_id(),\n wsm_maker,\n )\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n maker_cat_funds_taker,\n cat_wallet_maker.get_asset_id(),\n wsm_taker,\n )\n maker_nfts = await basic_nft_wallet_maker.get_current_nfts()\n taker_nfts = await basic_nft_wallet_taker.get_current_nfts()\n assert len(maker_nfts) == 2\n assert len(taker_nfts) == 1\n\n assert nft_to_offer_asset_id_maker == taker_nfts[0].nft_id\n assert nft_to_offer_asset_id_taker_1 in [nft.nft_id for nft in maker_nfts]\n assert nft_to_offer_asset_id_taker_2 in [nft.nft_id for nft in maker_nfts]\n\n # Try another permutation\n complex_nft_offer = {\n cat_wallet_maker.id(): CAT_REQUESTED * -1,\n 1: int(XCH_REQUESTED / 2),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): CAT_REQUESTED,\n nft_to_offer_asset_id_maker: 1,\n }\n\n driver_dict = {\n nft_to_offer_asset_id_maker: match_puzzle(uncurry_puzzle(taker_nfts[0].full_puzzle)),\n bytes32.from_hexstr(cat_wallet_taker.get_asset_id()): PuzzleInfo(\n {\n \"type\": \"CAT\",\n \"tail\": \"0x\" + cat_wallet_taker.get_asset_id(),\n }\n ),\n }\n\n success, trade_make, error = await trade_manager_maker.create_offer_for_ids(\n complex_nft_offer, DEFAULT_TX_CONFIG, driver_dict=driver_dict, fee=uint64(0)\n )\n assert error is None\n assert success\n assert trade_make is not None\n\n trade_take, tx_records = await trade_manager_taker.respond_to_offer(\n Offer.from_bytes(trade_make.offer),\n wallet_node_taker.get_full_node_peer(),\n DEFAULT_TX_CONFIG,\n fee=uint64(0),\n )\n assert trade_take is not None\n assert tx_records is not None\n await time_out_assert(20, mempool_not_empty, True, full_node_api)\n\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph_token))\n\n # Now let's make sure the final wallet state is correct\n funds_maker = int(funds_maker + XCH_REQUESTED / 2)\n funds_taker = int(funds_taker - XCH_REQUESTED / 2)\n\n await time_out_assert(30, wallet_maker.get_unconfirmed_balance, funds_maker)\n await time_out_assert(30, wallet_maker.get_confirmed_balance, funds_maker)\n await time_out_assert(30, wallet_taker.get_unconfirmed_balance, funds_taker)\n await time_out_assert(30, wallet_taker.get_confirmed_balance, funds_taker)\n\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n taker_cat_funds_maker + CAT_REQUESTED,\n cat_wallet_taker.get_asset_id(),\n wsm_maker,\n )\n await time_out_assert(\n 30,\n get_cat_wallet_and_check_balance,\n maker_cat_funds_taker + CAT_REQUESTED,\n cat_wallet_maker.get_asset_id(),\n wsm_taker,\n )\n await time_out_assert(20, get_nft_count, 3, basic_nft_wallet_maker)\n await time_out_assert(20, get_nft_count, 0, basic_nft_wallet_taker)\n assert await basic_nft_wallet_maker.nft_store.get_nft_by_id(nft_to_offer_asset_id_maker) is not None", "def create_testnet_funded_wallet(self, name: str) -> bool:\n wallet = self.new(name)\n try:\n wallet.network = \"TEST\"\n wallet.activate_through_friendbot()\n wallet.add_known_trustline(\"TFT\")\n wallet.save()\n except Exception as e:\n self.delete(name)\n raise j.exceptions.Runtime(f\"Failed to create the wallet due to token service\")\n\n headers = {\"Content-Type\": \"application/json\"}\n url = \"https://gettft.testnet.grid.tf/tft_faucet/api/transfer\"\n data = {\"destination\": wallet.address}\n\n try:\n response = requests.post(url, json=data, headers=headers)\n except requests.exceptions.HTTPError:\n self.delete(name)\n raise j.exceptions.Runtime(\n f\"Failed to fund wallet can't reach {url} due to connection error. Changes will be reverted.\"\n )\n\n if response.status_code != 200:\n self.delete(name)\n raise j.exceptions.Runtime(\n f\"Failed to fund the wallet due to to facuet server error. Changes will be reverted.\"\n )\n\n j.logger.info(\"Wallet created successfully\")\n return True", "def init(count, neutrino, uri):\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n click.echo('starting btcd')\n start_btcd()\n \n for index in range(0, count):\n node = Node.from_index(index)\n start_lnd(node, neutrino, uri)\n wait_for_file(node.cert())\n init_lnd(node)\n\n first_node = Node.from_index(0)\n wait_for_file(first_node.macaroon())\n lndconnect_node(first_node)\n \n if count > 1:\n mining_node = Node.from_index(1)\n wait_for_file(mining_node.macaroon())\n _set_mining_node(mining_node)\n time.sleep(4)\n # We need at least 100 blocks because coinbase funds can’t be spent until after 100 \n # confirmations, and we need about 300 to activate segwit.\n _block(400)", "def init_host(self, host):\n self._precreate_network()\n LOG.info(_LI(\"Create/Update Ntwork and Subnet, Done.\"))", "def setup(bot):\n\tbot.add_cog(Wallet(bot))", "def __init__( self, **params ):\n \n host = custom( CPULimitedHost, cpu=cpuShare() ) \n link = custom( TCLink, bw=args.bandwidth, delay=delay() )\n \n Mininet.__init__(\n self,\n topo=BarrierTransactionTopo( **params ),\n host=host,\n link=link )", "def test_create_network_and_subnet(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 254\n self.__create_network_and_subnet_test_helper__(network_name, network_cidr)", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def __init__(\n self,\n web3: Web3,\n contract_manager: ContractManager,\n registry_address: Address,\n sync_start_block: int = 0,\n required_confirmations: int = 8,\n poll_interval: int = 10,\n ):\n super().__init__()\n self.web3 = web3\n self.contract_manager = contract_manager\n self.registry_address = registry_address\n self.sync_start_block = sync_start_block\n self.required_confirmations = required_confirmations\n self.poll_interval = poll_interval\n self.chain_id = int(web3.net.version)\n\n self.is_running = gevent.event.Event()\n self.token_networks: Dict[Address, TokenNetwork] = {}\n self.token_network_listeners: List[BlockchainListener] = []\n\n self.is_running = gevent.event.Event()\n\n log.info('Starting TokenNetworkRegistry Listener (required confirmations: {})...'.format(\n self.required_confirmations,\n ))\n self.token_network_registry_listener = BlockchainListener(\n web3=web3,\n contract_manager=self.contract_manager,\n contract_name=CONTRACT_TOKEN_NETWORK_REGISTRY,\n contract_address=self.registry_address,\n required_confirmations=self.required_confirmations,\n poll_interval=self.poll_interval,\n sync_start_block=self.sync_start_block,\n )\n log.info(\n f'Listening to token network registry @ {registry_address} '\n f'from block {sync_start_block}',\n )\n self._setup_token_networks()", "def rpc_create_connection(client, source, dest,crypto=\"btc\",\n rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n if(crypto==\"btc\"):\n rpc_server = get_ip_by_unknown(client, source)\n dest = get_ip_by_unknown(client, dest)\n else:\n rpc_server = get_ip_by_unknown(client, source,DOCK_NETWORK_NAME_ZCH)\n dest = get_ip_by_unknown(client, dest, DOCK_NETWORK_NAME_ZCH)\n #print rpc_user+\" \"+rpc_password+\" \"+rpc_server+ \" \"+str(rpc_port)\n rpc_connection = AuthServiceProxy(\"http://%s:%s@%s:%s\" % (rpc_user, rpc_password, rpc_server, rpc_port))\n rpc_connection.addnode(dest, \"add\")\n return True\n except JSONRPCException as err:\n print(err)\n return False", "def register_with_existing_node():\n #print('********************')\n print(request.get_json())\n node_address = request.get_json()[\"node_address\"]\n if not node_address:\n return \"Invalid data\", 400\n\n data = {\"node_address\": request.host_url}\n headers = {'Content-Type': \"application/json\"}\n\n # Make a request to register with remote node and obtain information\n response = requests.post(node_address + \"/register_node\",\n data=json.dumps(data), headers=headers)\n\n if response.status_code == 200:\n global blockchain\n global peers\n # update chain and the peers\n chain_dump = response.json()['chain']\n blockchain = create_chain_from_dump(chain_dump)\n peers.update(response.json()['peers'])\n return \"Registration successful\", 200\n else:\n # if something goes wrong, pass it on to the API response\n #print(response.content)\n #print(response.status_code)\n return response.content, response.status_code", "def _ready_litnodes(self):\n # Start lit node 0 and open websocket connection\n self.add_litnode()\n self.litnodes[0].args.extend([self.coins[0][\"wallit_code\"], \"127.0.0.1\"])\n self.litnodes[0].start_node()\n self.litnodes[0].add_rpc_connection(\"127.0.0.1\", \"8001\")\n\n # Start lit node 1 and open websocket connection\n self.add_litnode()\n self.litnodes[1].args.extend([\"-rpcport\", \"8002\", self.coins[0][\"wallit_code\"], \"127.0.0.1\"])\n self.litnodes[1].start_node()\n self.litnodes[1].add_rpc_connection(\"127.0.0.1\", \"8002\")\n\n self.log.info(\"Wait until lit nodes are sync'ed\")\n wait_until(lambda: self.litnodes[0].get_height(self.coins[0]['code']) == 500)\n wait_until(lambda: self.litnodes[1].get_height(self.coins[0]['code']) == 500)\n\n self.log.info(\"Connect lit nodes\")\n res = self.litnodes[0].Listen(Port=\"127.0.0.1:10001\")[\"result\"]\n self.litnodes[0].lit_address = res[\"Adr\"] + '@' + res[\"LisIpPorts\"][0]\n\n res = self.litnodes[1].Connect(LNAddr=self.litnodes[0].lit_address)\n assert not res['error']\n\n # Check that litnode0 and litnode1 are connected\n wait_until(lambda: len(self.litnodes[0].ListConnections()['result']['Connections']) == 1)\n assert_equal(len(self.litnodes[1].ListConnections()['result']['Connections']), 1)\n self.log.info(\"lit nodes connected\")", "def miner_main_send_tx(miner):\n if miner.pubkey in miner.balance:\n if miner.balance[miner.pubkey] > 10:\n other = random.choice(miner.peers)\n miner.create_transaction(other[\"pubkey\"], 10)\n print(f\"Miner {miner.name} sent transaction to {other['name']}\")", "def createAccount_blockchain():\n\n logsOfError=''\n try:\n account = tron.create_account\n except Exception as e:\n logsOfError = logsOfError+str(e)\n return {'publicKey':str(account.public_key), 'base58':str(account.address.base58), 'hex':str( account.address.hex), 'privateKey':str(account.private_key), 'logs':logsOfError, 'status':'success'}", "def register_sub_wallet(self, header, body):\n req_dir = \"register/subwallet\"\n method = self.__client.do_post\n\n req_params = self.__set_params(\n header,\n req_dir,\n body=body\n )\n return self.__client.do_request(\n req_params,\n method,\n )", "def create_host(self, conf, tenant_id, network_id, params):\n\t\tpass", "def do_bitcoind_setup(run_as_user, branch, base_path, dist_path, run_mode):\n user_homedir = os.path.expanduser(\"~\" + USERNAME)\n bitcoind_rpc_password = pass_generator()\n bitcoind_rpc_password_testnet = pass_generator()\n \n #Install bitcoind\n runcmd(\"rm -rf /tmp/bitcoind.tar.gz /tmp/bitcoin-0.9.1-linux\")\n runcmd(\"wget -O /tmp/bitcoind.tar.gz https://bitcoin.org/bin/0.9.1/bitcoin-0.9.1-linux.tar.gz\")\n runcmd(\"tar -C /tmp -zxvf /tmp/bitcoind.tar.gz\")\n runcmd(\"cp -af /tmp/bitcoin-0.9.1-linux/bin/64/bitcoind /usr/bin\")\n runcmd(\"cp -af /tmp/bitcoin-0.9.1-linux/bin/64/bitcoin-cli /usr/bin\")\n runcmd(\"rm -rf /tmp/bitcoind.tar.gz /tmp/bitcoin-0.9.1-linux\")\n\n #Do basic inital bitcoin config (for both testnet and mainnet)\n runcmd(\"mkdir -p ~%s/.bitcoin ~%s/.bitcoin-testnet\" % (USERNAME, USERNAME))\n if not os.path.exists(os.path.join(user_homedir, '.bitcoin', 'bitcoin.conf')):\n runcmd(r\"\"\"bash -c 'echo -e \"rpcuser=rpc\\nrpcpassword=%s\\nserver=1\\ndaemon=1\\ntxindex=1\" > ~%s/.bitcoin/bitcoin.conf'\"\"\" % (\n bitcoind_rpc_password, USERNAME))\n else: #grab the existing RPC password\n bitcoind_rpc_password = subprocess.check_output(\n r\"\"\"bash -c \"cat ~%s/.bitcoin/bitcoin.conf | sed -n 's/.*rpcpassword=\\([^ \\n]*\\).*/\\1/p'\" \"\"\" % USERNAME, shell=True).strip().decode('utf-8')\n if not os.path.exists(os.path.join(user_homedir, '.bitcoin-testnet', 'bitcoin.conf')):\n runcmd(r\"\"\"bash -c 'echo -e \"rpcuser=rpc\\nrpcpassword=%s\\nserver=1\\ndaemon=1\\ntxindex=1\\ntestnet=1\" > ~%s/.bitcoin-testnet/bitcoin.conf'\"\"\" % (\n bitcoind_rpc_password_testnet, USERNAME))\n else:\n bitcoind_rpc_password_testnet = subprocess.check_output(\n r\"\"\"bash -c \"cat ~%s/.bitcoin-testnet/bitcoin.conf | sed -n 's/.*rpcpassword=\\([^ \\n]*\\).*/\\1/p'\" \"\"\"\n % USERNAME, shell=True).strip().decode('utf-8')\n \n #Set up bitcoind startup scripts (will be disabled later from autostarting on system startup if necessary)\n runcmd(\"cp -af %s/linux/init/bitcoind.conf.template /etc/init/bitcoind.conf\" % dist_path)\n runcmd(\"sed -ri \\\"s/\\!RUN_AS_USER\\!/%s/g\\\" /etc/init/bitcoind.conf\" % USERNAME)\n runcmd(\"cp -af %s/linux/init/bitcoind-testnet.conf.template /etc/init/bitcoind-testnet.conf\" % dist_path)\n runcmd(\"sed -ri \\\"s/\\!RUN_AS_USER\\!/%s/g\\\" /etc/init/bitcoind-testnet.conf\" % USERNAME)\n \n #install logrotate file\n runcmd(\"cp -af %s/linux/logrotate/bitcoind /etc/logrotate.d/bitcoind\" % dist_path)\n runcmd(\"sed -ri \\\"s/\\!RUN_AS_USER_HOMEDIR\\!/%s/g\\\" /etc/logrotate.d/bitcoind\" % user_homedir.replace('/', '\\/'))\n \n #disable upstart scripts from autostarting on system boot if necessary\n if run_mode == 't': #disable mainnet daemons from autostarting\n runcmd(r\"\"\"bash -c \"echo 'manual' >> /etc/init/bitcoind.override\" \"\"\")\n else:\n runcmd(\"rm -f /etc/init/bitcoind.override\")\n if run_mode == 'm': #disable testnet daemons from autostarting\n runcmd(r\"\"\"bash -c \"echo 'manual' >> /etc/init/bitcoind-testnet.override\" \"\"\")\n else:\n runcmd(\"rm -f /etc/init/bitcoind-testnet.override\")\n \n return bitcoind_rpc_password, bitcoind_rpc_password_testnet" ]
[ "0.6547388", "0.6097607", "0.59806085", "0.5968251", "0.5830869", "0.57322717", "0.56176394", "0.5555726", "0.55104965", "0.534506", "0.53099746", "0.52939296", "0.5237358", "0.5192724", "0.5177056", "0.5170875", "0.5138953", "0.51370627", "0.5098258", "0.50938267", "0.5072076", "0.50614434", "0.50521415", "0.5047024", "0.50398695", "0.50117517", "0.50064033", "0.5002657", "0.4992691", "0.49758962" ]
0.70235425
0
Create words (if is None) and start seed and master node Then we generate the first addresses, so we can check if we already have transactions
def _manually_initialize(self): self.mnemonic = Mnemonic(self.language) if not self.words: # Initialized but still locked return # Validate words first self.validate_words() assert isinstance(self.passphrase, bytes), 'Passphrase must be in bytes' # Master seed seed = self.mnemonic.to_seed(self.words, self.passphrase.decode('utf-8')) # Master node from pycoin.networks.registry import network_for_netcode _register_pycoin_networks() network = network_for_netcode('htr') key = network.keys.bip32_seed(seed) # Until account key should be hardened # Chain path = 44'/0'/0'/0 # 44' (hardened) -> BIP44 # 280' (hardened) -> Coin type (280 = hathor) # 0' (hardened) -> Account # 0 -> Chain self.chain_key = key.subkey_for_path('44H/280H/0H/0') for key in self.chain_key.children(self.initial_key_generation, 0, False): self._key_generated(key, key.child_index())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(seed, model, tokenizer, temp=0.5):\n\n dictionary = [\"\"] + list(tokenizer.index_word.values())\n start = np.array(tokenizer.texts_to_sequences(seed)).reshape(1, -1)\n if seed[0] == '<start>':\n output = [seed[-1]]\n else:\n output = seed[:]\n\n for _ in range(45):\n weights = reweight_distribution(model.predict(start), temperature=temp)\n word = np.random.choice(dictionary, size=1, p=weights[0, :])[0]\n if word == '<end>': \n if len(output) > 10:\n break\n else:\n continue\n output.append(word)\n start = np.append(start[0, 1:], tokenizer.texts_to_sequences([word])).reshape(1, -1)\n return \" \".join(output)", "def do_generate(self, args):\n if not len(args) == 0:\n self.wallet.generate_address(args)\n else:\n self.wallet.generate_address_randomKey()", "def _generate_worker(text):\n words = text.split(' ')\n response = None\n\n if len(words) > 6:\n index = random.randint(0, len(words)-2)\n response = MarkovMongo().generate((words[index], words[index + 1]))\n logging.info(\"Generated \\\"{0}\\\" from seed \\\"({1}, {2})\\\"\".format(\n response, words[index], words[index + 1]))\n elif len(words) > 2:\n index = random.randint(0, len(words)-1)\n response = MarkovMongo().generate(words[index])\n logging.info(\"Generated \\\"{0}\\\" from seed \\\"{1}\\\"\".format(\n response, words[index]))\n else:\n response = MarkovMongo().generate()\n logging.info(\"Generated \\\"{0}\\\" with no seed\".format(response))\n\n return response", "def setup_targets(self):\n neighbourhood_distance = self.k_max_node_id / 10.\n for i in range(self.min_peers):\n distance = random.randint(0, neighbourhood_distance)\n address = (self.id + distance) % (self.k_max_node_id + 1)\n tolerance = self.k_max_node_id / self.min_peers\n self.targets.append(dict(address=address, tolerance=tolerance, connected=False))", "def _generate_genesis() -> None:\n logging.debug(\"Generating the genesis block\")\n new_recv_block(Block.genesis())", "def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text", "def createWord(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def gen_nodes(modelfile, starting_genes):\n # read json file with final model variables\n shape, top_genes, weights, output_key, biases = read_json(modelfile)\n\n # initialize database\n database = db.Database()\n\n # create list to store all layers\n NN = []\n\n # get input probe sequences\n input_seqs_df = inputs.probes_df(top_genes)\n # each layer is a dictionary with keys as names of strands and values as a list of seqs\n l_0 = {}\n probe_seqs = []\n for probe in input_seqs_df[\"Probe Sequences\"]:\n index = 0\n size = database.size\n while database.size < size + 1:\n try:\n database.database_insert(Seq(probe[index]))\n index += 1\n # except block handles case that NONE of the probe sequences were accepted into the database\n # ***TEMPORARY FIX***\n except IndexError:\n index -= 1\n break\n probe_seqs.append(Seq(probe[index]))\n l_0[\"Probe Sequence\"] = probe_seqs\n print(\"Layer 0: \", l_0)\n NN.append(l_0)\n\n # add the tether and promotor to the database\n database.database_insert(starting_genes[\"Tether\"])\n database.database_insert(starting_genes[\"T7 Promoter\"])\n\n # generate all the sequences for every node in each layer\n for layer in range(1, len(shape)):\n # add the cage and tether sequences to the layer dictionary\n l_i = {}\n l_i[\"Cage Sense\"] = [starting_genes[\"Cage Sense\"]] * shape[layer]\n l_i[\"Cage Antisense\"] = [starting_genes[\"Cage Antisense\"]] * shape[layer]\n l_i[\"Tether\"] = [starting_genes[\"Tether\"]] * shape[layer]\n\n print(\"getting anchor strands\")\n tether_length = len(starting_genes[\"Tether\"])\n size = database.size\n # generate anchor strands until all of them have been accepted into the database\n while database.size < size + shape[layer]:\n anchor = oligo.oligo(tether_length)\n database.database_insert(anchor)\n anchor_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n print(\"getting transcription factors\")\n threshold_energy = 9 # variable that can be changed, pos integer, see gen_tf for description\n static_tf_seqs = []\n tf_seqs = []\n for anchor in anchor_seqs:\n static_tf, tf = gen_tf(anchor, starting_genes[\"Tether\"], threshold_energy)\n static_tf_seqs.append(static_tf)\n tf_seqs.append(tf)\n print(\"DONE\")\n\n print(\"getting outputs\")\n output_length = 25 # length of dna transcript from one node\n size = database.size\n while database.size < size + shape[layer]:\n output = oligo.oligo(output_length).sequence\n database.database_insert(output)\n transcript_seqs = [Seq(x) for x in database.contents['Strand'][size:]]\n print(\"DONE\")\n\n # assemble longer strands in the node\n l_i[\"Static TF + Transcript Sense\"] = [static_tf_seqs[i] + starting_genes[\"T7 Promoter\"] + transcript_seqs[i]\n for i in range(shape[layer])]\n l_i[\"Transcript Antisense + Anchor\"] = [\n oligo.complement(transcript_seqs[i]) + oligo.complement(starting_genes[\"T7 Promoter\"]) + anchor_seqs[i] for\n i in range(shape[layer])]\n\n # intermediates are the strands that determine weights in toehold-mediated displacement\n print(\"getting intermediate\")\n toe_length = 20 # standard length for all toehold sequences\n # get the 2D matrix for this layer and round the values to one decimal place\n weight_matrix = np.array(weights[layer - 1])\n weight_matrix = np.round(weight_matrix, 1)\n intermediate_seqs = []\n tf_appendage_seqs = []\n for i in range(shape[layer - 1]):\n if layer == 1:\n output = NN[0][\"Probe Sequence\"][i]\n else:\n output = NN[layer - 1][\"Static TF + Transcript Sense\"][i][-output_length:]\n inters = []\n top_toe = output[:toe_length]\n b_dom = output[toe_length:]\n tf_appendage_seqs.append(b_dom)\n # get all the possible sequences for toehold weights between 0 and 1\n weight_dict = quant.find_quanta(top_toe)\n for j in range(shape[layer]):\n w = weight_matrix[j, i]\n tf = tf_seqs[j]\n a_star_tf = tf[:len(tf) // 2]\n if w < 0:\n # negative weights\n inters.append(a_star_tf + oligo.complement(b_dom) + weight_dict[w * -1])\n else:\n # positive weights\n inters.append(oligo.complement(a_star_tf) + oligo.complement(b_dom) + weight_dict[w])\n\n intermediate_seqs.append(inters)\n # each list in the nested list is for one node in the layer, get nodes row-wise\n l_i[\"Intermediate\"] = np.array(intermediate_seqs).T.tolist()\n print(\"DONE\")\n\n # TF and TF Inhibitor are products of toehold-mediated displacement for pos and neg weights, respectively\n full_tf_seqs_2D = []\n attack_seqs_2D = []\n for tf in tf_seqs:\n full_tf_seqs = []\n attack_seqs = []\n for appendage in tf_appendage_seqs:\n full_tf_seq = appendage + tf\n attack_seq = appendage + oligo.complement(tf[:len(tf) // 2])\n full_tf_seqs.append(full_tf_seq)\n attack_seqs.append(attack_seq)\n full_tf_seqs_2D.append(full_tf_seqs)\n attack_seqs_2D.append(attack_seqs)\n l_i[\"TF\"] = full_tf_seqs_2D\n l_i[\"TF Inhibitor\"] = attack_seqs_2D\n\n print(\"Layer {}: \".format(layer), l_i)\n # add the completed layer to the NN list\n NN.append(l_i)\n\n return NN", "def map_addr_tree_app(s, d, tors):\n if len(tors) < 2:\n print('map_addr_tree: Error: len(tors) < 2')\n eee\n tors1 = tors[:] \n n = len(tors)\n s_out = random.randint(0, n-1) + 1\n s_out = tors[s_out-1]\n tors1.remove(s_out)\n\n d_out = 'e1'\n return s_out, d_out", "def createDwords(self, start: ghidra.program.model.address.Address, count: int) -> None:\n ...", "def generate():", "def seed():", "def address_generator(seed, address_start=0, address_depth=1):\n\n count = address_start\n ag = AddressGenerator(seed, checksum=True)\n\n for address in ag.get_addresses(address_start, address_depth):\n print('Address {}:'.format(count), address)\n count += 1", "async def create_accounts(self):\n self._logger.info(\"Creating accounts...\")\n\n validator_peer_id = ((self.my_id - 1) % self.num_validators) + 1\n host, _ = self.experiment.get_peer_ip_port_by_id(validator_peer_id)\n horizon_uri = \"http://%s:%d\" % (host, 19000 + validator_peer_id)\n\n root_keypair = Keypair.from_secret(\"SDJ5AQWLIAYT22TCYSKOQALI3SNUMPAR63SEL73ASALDP6PYDN54FARM\")\n async with Server(horizon_url=horizon_uri, client=AiohttpClient()) as server:\n root_account = await server.load_account(root_keypair.public_key)\n self.root_seq_num = root_account.sequence\n self._logger.info(\"Setting root sequence number to %d\", self.root_seq_num)\n\n builder = TransactionBuilder(\n source_account=root_account,\n network_passphrase=\"Standalone Pramati Network ; Oct 2018\"\n )\n\n async def append_create_account_op(builder, root_keypair, receiver_pub_key, amount):\n builder.append_create_account_op(receiver_pub_key, amount, root_keypair.public_key)\n if len(builder.operations) == 100:\n self._logger.info(\"Sending create transaction ops...\")\n tx = builder.build()\n tx.sign(root_keypair)\n response = requests.get(\"http://%s:%d/tx?blob=%s\" % (host, 11000 + validator_peer_id,\n quote_plus(tx.to_xdr())))\n self._logger.info(\"Received response for create accounts request: %s\", response.text)\n\n await sleep(2)\n\n self.root_seq_num += 1\n\n partial_root_acc = Account(root_keypair.public_key, self.root_seq_num)\n builder = TransactionBuilder(\n source_account=partial_root_acc,\n network_passphrase=\"Standalone Pramati Network ; Oct 2018\"\n )\n\n return builder\n\n for client_index in range(self.num_validators + 1, self.num_validators + self.num_clients + 1):\n receiver_keypair = Keypair.random()\n builder = await append_create_account_op(builder, root_keypair, receiver_keypair.public_key, \"10000000\")\n self.experiment.send_message(client_index, b\"receive_account_seed\", receiver_keypair.secret.encode())\n\n # Create the sender accounts\n for account_ind in range(self.num_accounts_per_client):\n sender_keypair = Keypair.random()\n builder = await append_create_account_op(builder, root_keypair, sender_keypair.public_key, \"10000000\")\n self.experiment.send_message(client_index, b\"send_account_seed_%d\" % account_ind,\n sender_keypair.secret.encode())\n\n # Send the remaining operations\n if builder.operations:\n self._logger.info(\"Sending remaining create transaction ops...\")\n tx = builder.build()\n tx.sign(root_keypair)\n response = requests.get(\"http://%s:%d/tx?blob=%s\" % (host, 11000 + validator_peer_id,\n quote_plus(tx.to_xdr())))\n self._logger.info(\"Received response for create accounts request: %s\", response.text)\n self.root_seq_num += 1", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def createNodesFamily(familiesList, houseAddressesList):\n creationQuery = [] # Query that will contains all the queries for the node creation\n relationshipsQuery = [] # Query that will contains all the queries for the relationship creation\n for familyEl in familiesList:\n for memberEl in familyEl:\n currentQuery = (\n \"CREATE (p:Person {name: \\\"\" + str(memberEl[int(PersonAttribute.NAME)]) + \"\\\" , surname: \\\"\" +\n str(memberEl[int(PersonAttribute.SURNAME)]) + \"\\\" , age: \\\"\" + str(\n memberEl[int(PersonAttribute.AGE)]) +\n \"\\\" , mail: \\\"\" + str(memberEl[int(PersonAttribute.MAIL)]) + \"\\\" , number: \\\"\" +\n str(memberEl[int(PersonAttribute.NUMBER)]) + \"\\\" , app: \\\"\" +\n str(memberEl[int(PersonAttribute.APP)]) + \"\\\"}); \"\n )\n creationQuery.append(currentQuery)\n # Create the name of the house\n memberFamily = familyEl[0]\n familyName = memberFamily[PersonAttribute.NAME] + \" \" + memberFamily[PersonAttribute.SURNAME] + \" house\"\n addressIndex = randint(0, len(houseAddressesList) - 1)\n address = houseAddressesList[addressIndex]\n civicNumber = randint(0, MAX_CIVIC_NUMBER)\n currentQuery = (\n \"CREATE (h:House {name: \\\"\" + str(familyName) + \"\\\" , address: \\\"\" + str(\n address[HouseAttribute.ADDRESS]) +\n \"\\\", civic_number: \\\"\" + str(civicNumber) + \"\\\" , CAP: \\\"\" + str(address[HouseAttribute.CAP]) +\n \"\\\", city: \\\"\" + str(address[HouseAttribute.CITY]) + \"\\\" , province: \\\"\"\n + str(address[HouseAttribute.PROVINCE]) + \"\\\"}); \"\n )\n creationQuery.append(currentQuery)\n\n # Create the LIVE relationships\n for memberEl in familyEl:\n currentQuery = (\n \"MATCH (p:Person) , (h:House) \"\n \"WHERE p.name = \\\"\" + str(memberEl[int(PersonAttribute.NAME)]) +\n \"\\\" AND p.surname = \\\"\" + str(memberEl[int(PersonAttribute.SURNAME)]) + \"\\\" AND p.age= \\\"\" +\n str(memberEl[int(PersonAttribute.AGE)]) + \"\\\" AND h.name = \\\"\" + str(familyName) +\n \"\\\" AND h.address = \\\"\" + str(address[HouseAttribute.ADDRESS]) + \"\\\" AND h.civic_number = \\\"\" +\n str(civicNumber) + \"\\\" AND h.CAP = \\\"\" + str(address[HouseAttribute.CAP]) +\n \"\\\" AND h.city = \\\"\" + str(address[HouseAttribute.CITY]) + \"\\\" AND h.province = \\\"\" +\n str(address[HouseAttribute.PROVINCE]) + \"\\\" \"\n \"CREATE (p)-[:LIVE]->(h);\"\n )\n relationshipsQuery.append(currentQuery)\n\n return creationQuery, relationshipsQuery", "def map_addr_tree(s, d, tors):\n if len(tors) < 2:\n print('map_addr_tree: Error: len(tors) < 2')\n eee\n tors1 = tors[:] \n n = len(tors)\n #s_out = crc8(0, s, 0x31)%n + 1\n s_out = random.randint(0, n-1) + 1\n s_out = tors[s_out-1]\n tors1.remove(s_out)\n #d_out = crc8(0, d, 0x1d)%(n-1) + 1\n d_out = random.randint(0, n-2) + 1\n d_out = tors1[d_out-1]\n return s_out, d_out", "def test82_GenNewAddress(self):\n payload = {\n 'id': 0,\n 'params': {'amount': 100.0, 'qr_code': False, 'gen_new': False},\n 'jsonrpc': '2.0',\n 'method': 'create_order'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result']\n self.assertEqual(res['receiving_address'], 'mjPS9N4T6cjcWLvdkv4jtCrzNA6C6qm8uv')\n self.assertEqual(res['amount'], '0.2860001')\n self.assertTrue(res['exact_amount'])\n order_id = res['order_id']\n payload = {\n 'id': 0, 'params': {'bindings':{'receiving_address': 'mjPS9N4T6cjcWLvdkv4jtCrzNA6C6qm8uv'}},\n 'jsonrpc': '2.0',\n 'method': 'get_address'\n }\n res = requests.post( url, data=json.dumps(payload), headers=headers).json()['result'][0]\n self.assertEqual(res['keypath'], '0/0/4')\n self.assertEqual(res['max_tx'], config.MAX_LEAF_TX)\n self.assertTrue(res['special_digits'] > 0)", "def _generate_new_address(self) -> str:\n while True:\n address = \"0x\" + \"\".join([str(hex(randint(0, 16)))[-1] for _ in range(20)])\n if address not in self.accounts.keys():\n return address", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def main():\n unigramCost, bigramCost, possibleFills = getRealCosts()\n \n #resulSegment = segmentWords('thisisnotmybeautifulhouse', unigramCost)\n #print(resulSegment)\n #print(f'assimpleasthat = {unigramCost(\"assimpleasthat\")}')\n #print(f'as simple as that = {unigramCost(\"as\")}, {unigramCost(\"simple\")}, {unigramCost(\"as\")}, {unigramCost(\"tha\")}')\n \n\n resultInsert = insertVowels('wld lk t hv mr lttrs'.split(), bigramCost, possibleFills)\n #print(f'om pa = {bigramCost(\"om\", \"pa\")}')\n #print(f'me up = {bigramCost(\"me\", \"up\")}')\n print(f'a cnbdfbzadfbzfbascvdvndsa = {bigramCost(\"a\", \"cnbdfbzadfbzfbascvdvndsa\")}')\n print(f'enough already = {bigramCost(\"enough\", \"already\")}')\n print(f'ngh lrdy = {bigramCost(\"ngh\", \"lrdy\")}')\n\n # print(f'would like = {bigramCost(\"would\", \"like\")}')\n # print(f'like to = {bigramCost(\"like\", \"to\")}')\n # print(f'to have = {bigramCost(\"to\", \"have\")}')\n # print(f'have more = {bigramCost(\"have\", \"more\")}')\n # print(f'more letters = {bigramCost(\"more\", \"letters\")}')\n \n print(resultInsert)", "def _generate_raw_environments(self, num, seed):", "def create_seed(self, position_stem=np.array([0, 0, 0.]), position_root=np.array([0, 0, 2.])):\n cells = []\n cell_stem = Cell(idx=0, position=position_stem, connections = [1])\n cell_root = Cell(idx=1, position=position_root, connections = [0])\n cells.append(cell_stem)\n cells.append(cell_root) \n return cells", "def initial_distribution(self):\n entity_miner = self.entities[0]\n entity_a1 = self.entities[1]\n\n entity_miner.send_bitcoins(entity_a1.address, 50.0)\n entity_miner.send(entity_a1.address, MSC, '50.00000000')\n entity_miner.send(entity_a1.address, TMSC, '50.00000000')\n entity_miner.send(entity_a1.address, MIndiv1, '50')\n # A1 does not receive any MDiv1\n # A1 does not receive any TIndiv1\n entity_miner.send(entity_a1.address, TDiv1, '50.00000000')\n\n self.generate_block()\n self.check_balance(entity_a1.address, MSC, '50.00000000', '0.00000000') # SP 1\n self.check_balance(entity_a1.address, TMSC, '50.00000000', '0.00000000') # SP 2\n self.check_balance(entity_a1.address, MIndiv1, '50', '0') # SP 3\n self.check_balance(entity_a1.address, MDiv1, '0.00000000', '0.00000000') # SP 4\n self.check_balance(entity_a1.address, TIndiv1, '0', '0') # SP 2147483651\n self.check_balance(entity_a1.address, TDiv1, '50.00000000', '0.00000000') # SP 2147483655", "def generate (self, n, ind = 0):\n\n addr = \"chirt1qcmdxwpu35mqlzxz3alc9u9ztp22edsuc5s7zzk\"\n self.generatetoaddress (self.nodes[ind], n, addr)", "def createFromMnemonic(words, path, password, chain):\n decoded = mnemonic.decode(words)\n cksum = decoded[-1]\n userSeed = decoded[:-1]\n cs = crypto.sha256ChecksumByte(userSeed.b)\n if cs != cksum:\n raise Exception(\"bad checksum %r != %r\" % (cs, cksum))\n return Wallet.create(path, password, chain, userSeed=userSeed)", "def generate(self, seed_text, next_words=20, T=0.9):\n\n index_to_word = {index: word for word, index in self.tokenizer.word_index.items()}\n\n for _ in range(next_words):\n token_list = self.tokenizer.texts_to_sequences([seed_text])[0]\n token_list = pad_sequences([token_list], maxlen=self.max_sequence_len, padding='pre')\n\n probas = self.model.predict(token_list, verbose=0)\n probas = np.array(probas[0][1:])\n probas = probas ** (1.0 / T)\n probas /= np.sum(probas)\n predicted = np.random.choice(range(1,self.total_words), p=probas)\n \n seed_text += \" \" + (index_to_word[predicted] if predicted != 0 else '')\n\n return seed_text", "def makeTree(node,baseName,baseAddress,nodes,parentNode,vars,isGenerated):\n \n if (isGenerated == None or isGenerated == False) and node.get('generate') is not None and node.get('generate') == 'true':\n generateSize = parseInt(node.get('generate_size'))\n generateAddressStep = parseInt(node.get('generate_address_step'))\n generateIdxVar = node.get('generate_idx_var')\n for i in range(0, generateSize):\n vars[generateIdxVar] = i\n makeTree(node, baseName, baseAddress + generateAddressStep * i, nodes, parentNode, vars, True)\n return\n newNode = Node()\n name = baseName\n if baseName != '': name += '.'\n if node.get('id') is not None:\n name += node.get('id')\n name = substituteVars(name, vars)\n newNode.name = name\n if node.get('description') is not None:\n newNode.description = node.get('description')\n address = baseAddress\n if node.get('address') is not None:\n address = baseAddress + parseInt(node.get('address'))\n newNode.address = address\n newNode.real_address = (address<<2)+0x64000000\n newNode.permission = node.get('permission')\n newNode.mask = parseInt(node.get('mask'))\n newNode.isModule = node.get('fw_is_module') is not None and node.get('fw_is_module') == 'true'\n if node.get('sw_monitor_warn_min_threshold') is not None:\n newNode.warn_min_value = node.get('sw_monitor_warn_min_threshold') \n if node.get('sw_monitor_error_min_threshold') is not None:\n newNode.error_min_value = node.get('sw_monitor_error_min_threshold') \n nodes[name] = newNode\n if parentNode is not None:\n parentNode.addChild(newNode)\n newNode.parent = parentNode\n newNode.level = parentNode.level+1\n for child in node:\n makeTree(child,name,address,nodes,newNode,vars,False)", "def map_addr_tree_3(s, d, tors, block=4):\n block_id = crc8(0, s, 0x31)%block\n\n s_d = crc8(0, s, 0x31)%2\n\n n1 = 128\n \n if block_id == 0:\n n2 = 128*3\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[s_out], tors[128 + d_out]\n elif block_id == 3:\n n2 = 128*3\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[d_out]\n elif block_id == 1:\n out_block = random.randint(0, 2)\n if out_block == 0:\n n2 = 128\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[d_out]\n elif out_block == 1:\n n2 = 128\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[128*(block_id+1) + d_out]\n else:\n n2 = 128\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[128*(block_id+2) + d_out]\n elif block_id == 2:\n out_block = random.randint(0, 2)\n if out_block == 0:\n n2 = 128\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[d_out]\n elif out_block == 1:\n n2 = 128\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[128*(1+1) + d_out]\n else:\n n2 = 128\n s_out, d_out = random.randint(0, n1-1), random.randint(0, n2-1)\n s_out, d_out = tors[128*block_id + s_out], tors[128*(block_id+1) + d_out]\n \n \n return s_out, d_out", "def create_wallet(self):\n\n child = pexpect.spawn('node createKeystore', cwd='../src/')\n if commonUtil.show_logs:\n child.logfile = sys.stdout\n\n self.file_name = commonUtil.get_random_string()\n self.password = commonUtil.get_random_string()\n\n commonUtil.check_expect(\"Input file name\", child, test_name, \"'Input file name' prompt not found\")\n child.sendline(self.file_name)\n\n commonUtil.check_expect(\"Input password\", child, test_name, \"'Input password' prompt not found\")\n child.sendline(self.password)\n\n result = child.read()\n\n if result.find(\"Please check if the filename is used\") != -1:\n commonUtil.exit_test(\"file name is already present in the system\", child)\n\n if result.find(\"address\") == -1:\n commonUtil.exit_test(\"'address' title not found in wallet summary\", child)\n\n if result.find(\"waddress\") == -1:\n commonUtil.exit_test(\"waddress' title not found in wallet summary\", child)\n\n if result.find(self.file_name) == -1:\n commonUtil.exit_test(\"file name:\" + self.file_name + \" not found in wallet summary\", child)\n\n if result.find(self.password) == -1:\n commonUtil.exit_test(\"password:\" + self.password + \" not found in wallet summary\", child)\n\n address_start = result.find('0x')\n if address_start == -1:\n commonUtil.exit_test('address value starting with 0x not found in wallet summary', child)\n self.address = result[address_start:address_start + 42]\n\n waddress_start = result.find('0x', address_start + 42)\n if waddress_start == -1:\n commonUtil.exit_test('wan address value starting with 0x not found in wallet summary', child)\n self.waddress = result[waddress_start:waddress_start + 134]\n\n child.expect(pexpect.EOF)" ]
[ "0.5785551", "0.5728934", "0.5649208", "0.55091643", "0.5480413", "0.5468495", "0.54453427", "0.5444538", "0.5374258", "0.5360612", "0.53517926", "0.52665097", "0.5249261", "0.5215236", "0.52090055", "0.5205174", "0.51885647", "0.5181177", "0.51633507", "0.51450145", "0.5141656", "0.5120101", "0.51109415", "0.51071745", "0.5104244", "0.51034564", "0.51033574", "0.510187", "0.5098137", "0.5078945" ]
0.6000683
0
Generate a new key in the tree at defined index We add this new key to self.keys and set last_generated_index
def generate_new_key(self, index): new_key = self.chain_key.subkey(index) self._key_generated(new_key, index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def __gen_keys__(self):\n if self.seed == b'':\n self.seed = urandom(self.seed_size)\n\n n_prev = Node(hash=hash_factory(data=bytes(self.seed)).digest())\n self.keys.insert(0, n_prev)\n\n for i in range(1, self.l + 1):\n n = Node(hash=hash_factory(data=bytes(n_prev.hash)).digest())\n self.keys.insert(0, n)\n n_prev = n\n\n # Add the decoy nodes as parents of pair nodes.\n # The pair nodes will _always_ be the right child of the decoy nodes.\n for i in range(2, self.l + 1, 2):\n n_pair = self.keys[i] # type: Node\n n_impair_prev = self.keys[i-1] # type: Node\n n_pair.parent = Node(hash=bytes(n_impair_prev.hash))\n n_pair.parent.right_child = n_pair", "def make_new_key(idx, key, d):\n\n new_key = \"%s_%d\" % (key, idx)\n if new_key in d:\n return make_new_key(idx + 1, key, d)\n return new_key", "def gen_keys():", "def made_key(self):\n \n # select a random number from 1 to infinity \n ran_number = random.randint(1,99)\n\n # create a random set based on the first number you chose \n set = xrange(ran_number,28*ran_number,ran_number)\n\n # increase the value of every number in the set \n for item in set:\n item += 3\n Code_Fouad_Teniou.my_key.append(item)\n\n #return a random key \n return Code_Fouad_Teniou.my_key", "def update_key(self):\n self.__prev_key = self.__new_key", "def initiate_new_key (self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def _newKey(self, key):\n pass", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def insert_key(self, key : str, value : int) -> None:\n \n hash_key = self.hash_key(key)\n head = self.array[hash_key]\n \n while head.next:\n if head.next.key == key:\n head.next.value = value\n return\n head = head.next\n head.next = Node(key,value)\n self.keys.append(key)", "def _update_append_key(self):\n self.append_key += 1", "def transfer_key_counter_clockwise(self, index):\r\n left, right = self.children[index : index+2]\r\n left.keys.append(self.keys[index])\r\n\r\n if not right.is_leaf():\r\n left.children.append(right.children[0])\r\n del right.children[0]\r\n\r\n self.keys[index] = right.keys[0]\r\n del right.keys[0]", "def new_key(self):\n return max(self.code_table.keys()) + 1", "def insert(self, key):\r\n index = self.search(key)\r\n self.keys.insert(index, key)", "def at_new_key_put(self, value):\n key = self.new_key()\n self.code_table[key] = value\n return key", "def _insert(self, key):\n self.tree.insert(key)", "def generate_reverse_index(self):", "def insert(self, key):\r\n if self.root.num_keys() == self.max_num_keys:\r\n self.root = Node([], [self.root])\r\n self.root.split_child(0)\r\n\r\n node = self.root \r\n while not node.is_leaf():\r\n index = node.search(key)\r\n\r\n child = node.children[index]\r\n if child.num_keys() == self.max_num_keys:\r\n node.split_child(index)\r\n\r\n if node.keys[index] < key:\r\n index += 1\r\n\r\n node = node.children[index] \r\n\r\n node.insert(key)", "def add_key(self,key,index):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if key in self.key_dict:\r\n\r\n self.key_dict[key].add(str(index))\r\n\r\n else:\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def addSequence_aux(self, current, key, i):\r\n # base\r\n if i == len(key):\r\n index = 0\r\n\r\n # Create leaf node\r\n prev = current\r\n if current.link[index] is not None:\r\n current = current.link[index]\r\n # If path doesn't exist\r\n else:\r\n current.link[index] = Node()\r\n current = current.link[index]\r\n current.frequency += 1\r\n current.string = key\r\n # If it is the first node added on database that ends with the prev letter\r\n if prev.highest_freq is None:\r\n prev.highest_freq = current\r\n # If it is not then compare it's frequency\r\n else:\r\n if prev.highest_freq.frequency < current.frequency:\r\n prev.highest_freq = current\r\n return current\r\n elif i < len(key):\r\n # Calculate index\r\n # $ = 0, A = 1, B = 2, C = 3, D = 4\r\n index = ord(key[i]) - 65 + 1\r\n # If path exist\r\n if current.link[index] is not None:\r\n current = current.link[index]\r\n # If path doesn't exist\r\n else:\r\n current.link[index] = Node()\r\n current = current.link[index]\r\n i += 1\r\n\r\n # Increments the frequency of occurrence\r\n leaf = self.addSequence_aux(current, key, i)\r\n # Updating the element that is right before the end of the string\r\n if leaf.highest_freq is None:\r\n # If its the first element added to the database\r\n if current.highest_freq is None:\r\n current.highest_freq = leaf\r\n current.frequency = leaf.frequency\r\n current.index_next = leaf.index\r\n current.index = index\r\n # If its not the first element, compare to the frequency of the existing string of highest frequency\r\n else:\r\n if current.frequency < leaf.frequency:\r\n current.frequency = leaf.frequency\r\n current.highest_freq = leaf\r\n current.index_next = leaf.index\r\n current.index = index\r\n # if they have the same frequency then compare the lexicographical order\r\n elif current.frequency == leaf.frequency:\r\n if current.index_next >= leaf.index:\r\n current.index_next = leaf.index\r\n current.frequency = leaf.frequency\r\n current.highest_freq = leaf\r\n else:\r\n # Updating the rest of the elements\r\n if current.frequency < leaf.frequency:\r\n current.frequency = leaf.frequency\r\n current.highest_freq = leaf.highest_freq\r\n current.index_next = leaf.index\r\n current.index = index\r\n # if they have the same frequency then compare the lexicographical order\r\n elif current.frequency == leaf.frequency:\r\n if current.index_next >= leaf.index:\r\n current.index_next = leaf.index\r\n current.frequency = leaf.frequency\r\n current.highest_freq = leaf.highest_freq\r\n return current", "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def add(self, key):\n node, parent = Treap._find_node(key, self.root)\n if node:\n node.n += 1\n else:\n heap_id = self.rand.randrange(self.max_heap_id)\n node = Node(key, heap_id)\n if parent:\n node.parent = parent\n parent.child[node.key > parent.key] = node\n else:\n self.root = node\n\n self._prioritize(node)", "def gen_key(self, key):\n b_key = self._hash_digest(key)\n return self._hash_val(b_key, lambda x: x)", "def GenerateKey(self):\n self.key_name = self.key_name or str(uuid.uuid4())\n if self.key is None or not self.key.id():\n self.key = ndb.Key(self._get_kind(), self.key_name)\n return True\n return False", "def get_key(self, proxy_index):\n return self.treeItem(proxy_index)", "def insert_index(self):\n pass", "def __gen_merkle_tree__(self):\n tree_stage = []\n tree_stage_num = int(log2(self.l))\n current_tree_stage = self.keys[1:]\n\n for i in range(0, tree_stage_num):\n tree_stage.insert(i, self.__gen_parent_level_tree__(current_tree_stage))\n current_tree_stage = tree_stage[i]\n\n assert len(current_tree_stage) == 1\n\n self.hash_tree_root = current_tree_stage[0]", "def inc(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n self.bucket_of_keys[key] = self.buckets.insert(self.buckets.begin(), Node(0, {key}))\n bucket, next_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value + 1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value + 1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_keys[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)", "def addSequence(self, key):\r\n self.is_empty = False\r\n current = self.root\r\n i = 0\r\n highest_leaf = self.addSequence_aux(current, key, i)\r\n # Updating the root\r\n # If it is the first element that is added to the database\r\n if highest_leaf.highest_freq is None:\r\n current.highest_freq = highest_leaf\r\n current.frequency = highest_leaf.frequency\r\n current.index_next = highest_leaf.index\r\n else:\r\n # Compare the frequency if it is not the first element on the database\r\n if current.frequency < highest_leaf.frequency:\r\n current.frequency = highest_leaf.frequency\r\n current.highest_freq = highest_leaf.highest_freq\r\n current.index_next = highest_leaf.index\r\n # If the frequency is equal then compare the lexicographical order\r\n elif current.frequency == highest_leaf.frequency:\r\n if current.index_next >= highest_leaf.index:\r\n current.frequency = highest_leaf.frequency\r\n current.highest_freq = highest_leaf.highest_freq\r\n current.index_next = highest_leaf.index", "def transfer_key_clockwise(self, index):\r\n left, right = self.children[index : index+2]\r\n right.keys.insert(0, self.keys[index])\r\n\r\n if left.children:\r\n right.children.insert(0, left.children[-1])\r\n del left.children[-1]\r\n\r\n self.keys[index] = left.keys[-1]\r\n del left.keys[-1]" ]
[ "0.80246866", "0.67809486", "0.6456904", "0.6355784", "0.63524437", "0.6209058", "0.61994654", "0.6194674", "0.6126169", "0.6099803", "0.6098625", "0.6081934", "0.60543513", "0.59544474", "0.58790505", "0.58374196", "0.58367145", "0.5815566", "0.57760036", "0.57750195", "0.57667035", "0.57599795", "0.5751091", "0.57428", "0.5740301", "0.5727466", "0.5690426", "0.5680602", "0.5650117", "0.5647238" ]
0.7571303
1
Add generated key to self.keys and set last_generated_index
def _key_generated(self, key, index): self.keys[self.get_address(key)] = key self.last_generated_index = index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_append_key(self):\n self.append_key += 1", "def gen_keys():", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def made_key(self):\n \n # select a random number from 1 to infinity \n ran_number = random.randint(1,99)\n\n # create a random set based on the first number you chose \n set = xrange(ran_number,28*ran_number,ran_number)\n\n # increase the value of every number in the set \n for item in set:\n item += 3\n Code_Fouad_Teniou.my_key.append(item)\n\n #return a random key \n return Code_Fouad_Teniou.my_key", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def update_key(self):\n self.__prev_key = self.__new_key", "def _newKey(self, key):\n pass", "def __gen_keys__(self):\n if self.seed == b'':\n self.seed = urandom(self.seed_size)\n\n n_prev = Node(hash=hash_factory(data=bytes(self.seed)).digest())\n self.keys.insert(0, n_prev)\n\n for i in range(1, self.l + 1):\n n = Node(hash=hash_factory(data=bytes(n_prev.hash)).digest())\n self.keys.insert(0, n)\n n_prev = n\n\n # Add the decoy nodes as parents of pair nodes.\n # The pair nodes will _always_ be the right child of the decoy nodes.\n for i in range(2, self.l + 1, 2):\n n_pair = self.keys[i] # type: Node\n n_impair_prev = self.keys[i-1] # type: Node\n n_pair.parent = Node(hash=bytes(n_impair_prev.hash))\n n_pair.parent.right_child = n_pair", "def insert(self, key):\r\n index = self.search(key)\r\n self.keys.insert(index, key)", "def new_key(self):\n return max(self.code_table.keys()) + 1", "def __setitem__(self, key, value):\n if key not in self.ordered_keys:\n self.ordered_keys.append(key)\n super().__setitem__(key, value)", "def add(self, key):\n self.times[key] = time.time()", "def GenerateKey(self):\n self.key_name = self.key_name or str(uuid.uuid4())\n if self.key is None or not self.key.id():\n self.key = ndb.Key(self._get_kind(), self.key_name)\n return True\n return False", "def self_insert():\r\n insert_char(last_key())", "def _pre_put_hook(self): # pylint: disable=g-bad-name\n super(BaseModel, self)._pre_put_hook() # pylint: disable=protected-access\n self.GenerateKey()", "def _unique_key(self):\n key = f'factor_{self.counter}'\n self.counter += 1\n return key", "def insert_key(self, key : str, value : int) -> None:\n \n hash_key = self.hash_key(key)\n head = self.array[hash_key]\n \n while head.next:\n if head.next.key == key:\n head.next.value = value\n return\n head = head.next\n head.next = Node(key,value)\n self.keys.append(key)", "def save(self, key=None):\n\n # we can override our key by passing one in explicitly\n if key: self._key = key\n\n # now save in the db\n if self._key:\n self._dbag[self._key] = self.to_python()\n else:\n self._key = self._dbag.add(self.to_python())\n return self._key", "def _unique_key(self):\n key = f'param_{self.counter}'\n self.counter += 1\n return key", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def _make_key(self, record_dict: Dict[str, Any]) -> int:\n return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))", "def inc(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n self.bucket_of_keys[key] = self.buckets.insert(self.buckets.begin(), Node(0, {key}))\n bucket, next_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value + 1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value + 1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_keys[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)", "def at_new_key_put(self, value):\n key = self.new_key()\n self.code_table[key] = value\n return key", "def regenerate_API_key(self) -> None:\n session = create_session()\n new_key = generate_random_string(24)\n # Check if there is any user with exact same API key as just generated\n if new_key not in session.query(User.API_KEY).all():\n self.API_KEY = new_key\n session.merge(self)\n session.commit()\n else:\n while new_key in session.query(User.API_KEY).all():\n new_key = generate_random_string(24)\n self.API_KEY = new_key\n session.merge(self)\n session.commit()", "def make_new_key(idx, key, d):\n\n new_key = \"%s_%d\" % (key, idx)\n if new_key in d:\n return make_new_key(idx + 1, key, d)\n return new_key", "def touchKBucket(self, key):", "def generate_key(self, rand = random.SystemRandom()):\n k = rand.randrange(0, self.n - 1)\n return k, self.base_mul(k)", "def initiate_new_key (self,key,index):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n\r\n\r\n self.key_dict[key] = {str(index)}\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO all_keys (keyword, notebook)\"\r\n +\" VALUES (?,?);\",\r\n value_tuple)\r\n value_tuple = (notebookname, key, str(index))\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO keys_to_indexes\"\r\n +\" (notebook, keyword, note_index)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def add(self, key):\r\n if key not in self.map:\r\n end = self.end\r\n curr = end[PREV]\r\n curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]\r\n if self.emitter:\r\n self.emitter.emit()", "def key(self, key):\n self._key = key" ]
[ "0.70120007", "0.67435443", "0.66058487", "0.65902716", "0.65373045", "0.6447427", "0.63391894", "0.6228748", "0.6154519", "0.60397923", "0.60168254", "0.59762555", "0.59666264", "0.595028", "0.59233314", "0.5900949", "0.5872757", "0.5861927", "0.5853988", "0.5841961", "0.5828071", "0.58257514", "0.581002", "0.5772928", "0.57691634", "0.5756211", "0.57282674", "0.5723285", "0.57212275", "0.5717762" ]
0.84722704
0
Return the key generated by the index in the parameter
def get_key_at_index(self, index): return self.chain_key.subkey(index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id(self, index):\n return self.__keys[index]", "def eval_key(self, index):\n return self._EVAL_PREFIX + str(index)", "def _unique_key(self):\n key = f'param_{self.counter}'\n self.counter += 1\n return key", "def _index_key(self, sig, codegen):\n return (sig, codegen.magic_tuple())", "def key(self):\n return self.sentence_idx * (10 ** 6) + self.get_id()", "def get_index(self, key):\r\n\t\tindex = self._hash_function(key) % self.capacity\r\n\t\treturn index", "def _get_index(self, key):\n return self._hash_function(key) % self.capacity", "def key(self):\n return self.key_for(self.id)", "def get_key_id(self):", "def key():", "def key(key):\n return key", "def get_key(self, item):\r\n return item[0]", "def key(self):\n return key_for_name(self.name)", "def get_key(self) -> int:\n return self.key", "def get_es_key(idx, ps_key):\n for es_k, ps_k in FIELDS_MAP[idx].iteritems():\n if ps_key == ps_k:\n return es_k", "def _get_memoization_key(self, *args, **kwargs):\n #result = [id(fn)]\n #for arg in args:\n # result.append(id(arg))\n #result.append(id(mark))\n #for key, value in kwargs:\n # result.append(key)\n # result.append(id(value))\n #return tuple(result)\n key = str(self._inject_obj(args)) + str(kwargs)\n return key", "def hash_function(self, key):\n index = key % len(self.objects_list)\n return index", "def index_id(i):\n return f\"(i={i})\"", "def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reindex_dict.values())+1\n reindex_dict[m] = new_m\n reindexed_marks.append(new_m)\n return tuple( [self.component1.genus] + sorted(reindexed_marks) )", "def _index_key(self, sig, codegen):\n codebytes = self._py_func.__code__.co_code\n if self._py_func.__closure__ is not None:\n cvars = tuple([x.cell_contents for x in self._py_func.__closure__])\n # Note: cloudpickle serializes a function differently depending\n # on how the process is launched; e.g. multiprocessing.Process\n cvarbytes = dumps(cvars)\n else:\n cvarbytes = b''\n\n hasher = lambda x: hashlib.sha256(x).hexdigest()\n return (sig, codegen.magic_tuple(), (hasher(codebytes),\n hasher(cvarbytes),))", "def GetVoucherManagerKeyForIndex(idx):\n return unsigned(kern.globals.iv_global_table[idx].ivgte_key)", "def get_key(self) -> int:\n return self.__key", "def GetSubkeyByIndex(self, index):", "def get_key(self, proxy_index):\n return self.treeItem(proxy_index)", "def key(self):\n return self._key if self._key else self.factory().key", "def key(self, name):\n return name", "def hash_index(self, key):\n #return self.fnv1(key) % self.capacity\n return self.djb2(key) % self.capacity", "def _idx(self, class_, key):\n return u':'.join((class_, key))", "def _index_lookup(self, key: int) -> str:\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token", "def getKey(self, index) -> AnimCurveKey:\n ..." ]
[ "0.75036955", "0.72083074", "0.71914303", "0.70859087", "0.69023156", "0.68941003", "0.6885927", "0.68858844", "0.6876659", "0.676597", "0.6760929", "0.6748612", "0.6699479", "0.66887134", "0.66435224", "0.6635806", "0.6619856", "0.660352", "0.66027474", "0.66022474", "0.65973425", "0.65790945", "0.65656745", "0.6546618", "0.6537803", "0.65157133", "0.6509303", "0.650166", "0.6500038", "0.6494217" ]
0.7258488
1
Method called when the wallet receive new tokens If the gap limit is not yet achieved we generate more keys
def tokens_received(self, address58: str) -> None: received_key = self.keys[address58] # If the gap now is less than the limit, we generate the new keys until the limit # Because we might be in sync phase, so we need those keys pre generated diff = self.last_generated_index - received_key.child_index() if (self.gap_limit - diff) > 0: for _ in range(self.gap_limit - diff): self.generate_new_key(self.last_generated_index + 1) # Last shared index should be at least the index after the received one self.last_shared_index = max(self.last_shared_index, received_key.child_index() + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key", "def __initialSigningKeys(self) -> None:\n seedStr = '0' * 31\n seedNum = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n seedList = []\n for i in range(15):\n seed = seedStr + seedNum[i]\n seedList.append(seed.encode('utf-8'))\n\n for seed in seedList:\n self.signingKeysList.append(SigningKey(seed))\n log.info(\"15 signing keys have been generated successfully\")", "def DeriveNextKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def newKeyGenerate():\n generate()\n return '', 204", "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def getNextApiKey(self):\n\n self.resetSession(get_new_api_key=False)\n\n if self.key_idx == len(self.api_keys):\n self.key_idx = 0\n\n self.session.auth = (self.api_keys[self.key_idx][0], '')\n self.number_of_max_req = self.api_keys[self.key_idx][1]\n\n self.key_idx += 1", "def gen_keys():", "def genKeys():\r\n (pub, priv) = rsa.newkeys(256)\r\n context = {\r\n 'pub': pub,\r\n 'priv': priv\r\n }\r\n return context", "def generate_new_token(cls):\n token = proquint.generate()\n\n # Try 100 times to generate a unique token.\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n # after TRIALS attempts and we didn't get a unique token,\n # just raise an error.\n # See https://stackoverflow.com/a/9980160 on what for-else loop does.\n else:\n raise ValueError(\"Cannot generate new token\")\n\n # We found a unique token! Save it\n return token", "def generate_key():\n return get_token_generator().generate_token()", "def EnableFreeAPIKeyRateLimit(self):\n self._hashes_per_batch = 4\n self._wait_after_analysis = 60.0", "def regenerate_API_key(self) -> None:\n session = create_session()\n new_key = generate_random_string(24)\n # Check if there is any user with exact same API key as just generated\n if new_key not in session.query(User.API_KEY).all():\n self.API_KEY = new_key\n session.merge(self)\n session.commit()\n else:\n while new_key in session.query(User.API_KEY).all():\n new_key = generate_random_string(24)\n self.API_KEY = new_key\n session.merge(self)\n session.commit()", "def get_next_allowed_request(self):\n\t\t#**********************************************************************\n\t\t# Only 4 requests a minute are allowed on VT for public API keys.\n\t\t#**********************************************************************\n\t\tdelay = VirusTotalSource.DelayBetweenRequest # Every X seconds\n\t\t#**********************************************************************\n\t\t# Provides the time of the next authorized request\n\t\t# 2 seconds are added to add some leeway.\n\t\t#**********************************************************************\n\t\treturn datetime.now() + timedelta(seconds=delay+2)", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def _fill_send_buffer_1d(self, key_base):\n first_time_step = FecDataView.get_first_machine_time_step()\n end_time_step = FecDataView.get_current_run_timesteps()\n if first_time_step == end_time_step:\n return\n keys = get_field_based_keys(key_base, self._vertex_slice)\n key_list = [keys[atom] for atom in range(self._vertex_slice.n_atoms)]\n for tick in sorted(self._send_buffer_times):\n if self._is_in_range(tick, first_time_step, end_time_step):\n self._send_buffer.add_keys(tick, key_list)", "def _fill_send_buffer(\n self, machine_time_step, first_machine_time_step,\n n_machine_time_steps):\n\n key_to_send = self._virtual_key\n if self._virtual_key is None:\n key_to_send = 0\n\n if self._send_buffer is not None:\n self._send_buffer.clear()\n if (self._send_buffer_times is not None and\n len(self._send_buffer_times) != 0):\n if hasattr(self._send_buffer_times[0], \"__len__\"):\n\n # Works with a list-of-lists\n for key in range(self._n_keys):\n for timeStamp in sorted(self._send_buffer_times[key]):\n time_stamp_in_ticks = int(math.ceil(\n float(int(timeStamp * 1000.0)) /\n machine_time_step))\n if self._is_in_range(\n time_stamp_in_ticks, first_machine_time_step,\n n_machine_time_steps):\n self._send_buffer.add_key(\n time_stamp_in_ticks, key_to_send + key)\n else:\n\n # Work with a single list\n key_list = [\n key + key_to_send for key in range(self._n_keys)]\n for timeStamp in sorted(self._send_buffer_times):\n time_stamp_in_ticks = int(math.ceil(\n float(int(timeStamp * 1000.0)) /\n machine_time_step))\n\n # add to send_buffer collection\n if self._is_in_range(\n time_stamp_in_ticks, first_machine_time_step,\n n_machine_time_steps):\n self._send_buffer.add_keys(\n time_stamp_in_ticks, key_list)", "def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))", "def generate_new_key(self, index):\n new_key = self.chain_key.subkey(index)\n self._key_generated(new_key, index)", "def generate_new_token(self):\n self.access_token = random_auth_key()", "def _apply_rate_limit(self):\n update_time = time()\n user_name = self.bot.user.full_name\n if user_name in self.tokens.keys():\n last_change = self.tokens[user_name][0]\n # Add 1 token for every 30 seconds from the last change\n added_tokens = int((update_time - last_change) / 30)\n self.tokens[user_name][1] += added_tokens\n # Max at 5 self.tokens\n if self.tokens[user_name][1] > 5:\n self.tokens[user_name][1] = 5\n else:\n # Initialize the users token pair (last change, # of self.tokens)\n self.tokens[user_name] = [update_time, 5] # Start with 5 self.tokens\n if self.tokens[user_name][1] <= 0:\n return False\n self.tokens[user_name][1] -= 1\n return True", "def free_slot(self, current_time):\n self.free_slots += 1\n new_events = self.maybe_start_task(current_time)\n assert (self.free_slots > 0)\n if self.free_slots > 0:\n token = TokenArrival(self.simulation, self.id)\n new_events.append((current_time+NETWORK_DELAY, token))\n #new_events.append((current_time, token))\n return new_events", "def ask_keys(self, update, context):\r\n update.message.reply_text('Введите новый ключ')\r\n return self.LISTEN", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def EnableFreeAPIKeyRateLimit(self):\n self._analyzer.hashes_per_batch = 4\n self._analyzer.wait_after_analysis = 60\n self._analysis_queue_timeout = self._analyzer.wait_after_analysis + 1", "def generate_token(self, length=6, valid_secs=300, commit=True):\n self.token = random_number_token(length)\n self.valid_until = timezone.now() + timedelta(seconds=valid_secs)\n if commit:\n self.save()", "def regenerate(self):\n self.secret_code = random.randint(self.min, self.max)", "def generate_tokens(callback_key):\n random_hash = generate_random_security_hash()\n\n return generate_security_hash(random_hash, callback_key), random_hash", "def add_token(self, amount):\n self.M += amount", "def create_key ():", "def generate_keystream(self):" ]
[ "0.5717665", "0.5667222", "0.55144507", "0.5494286", "0.5439673", "0.54292053", "0.5385075", "0.5318828", "0.53049546", "0.523834", "0.52360225", "0.520741", "0.51931775", "0.51233554", "0.51119256", "0.51012534", "0.5098826", "0.5066854", "0.5043255", "0.5040246", "0.5038378", "0.5024621", "0.49960625", "0.49893963", "0.49813336", "0.49542043", "0.49461818", "0.49444693", "0.49255416", "0.49004206" ]
0.7514984
0
Return if wallet is currently locked The wallet is locked if self.words is None
def is_locked(self) -> bool: return self.words is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def walletlock(self):\n return self.proxy.walletlock()", "def locked(self) -> bool:\n return pulumi.get(self, \"locked\")", "async def locked(self):\n return not \"not\" in await self.ask(\"locked\")", "def is_locked(self):\r\n pass", "def locked(self):\n if self._locked == None:\n return False\n return self._locked", "def locked(self):\n return self.is_locked", "def is_locked(self) -> bool | None:\n return self.instrument.is_locked", "def locked(self) -> bool:\n return self.__locked", "def is_locked(self):\n return self._is_locked", "def locked(self) -> bool:\n return self._locked", "def locked(self):\n\t\treturn self.__locked", "def locked(self):\n with self._block:\n status = repr(self).split(maxsplit=1)[0][1:]\n assert status in ('locked', 'unlocked')\n return status == 'locked'", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def locked(self):\n return self._locked", "def is_locked(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"is_locked\"))\r\n return self._is_locked", "def is_locked(self):\n return cache.get(self.id)", "def is_encrypted(self, wallet_manager):\n self._load_wallet(wallet_manager)\n hotstorage_path = wallet_manager.rpc_path + \"_hotstorage\"\n wallet_path = os.path.join(hotstorage_path, self.alias)\n try:\n # check if password is enabled\n info = wallet_manager.rpc.getwalletinfo(wallet=wallet_path)\n return \"unlocked_until\" in info\n except Exception as e:\n logger.warning(\"Cannot fetch hot wallet info\")\n # Assuming encrypted by default\n return True", "def is_locked(self):\n ret_val = self._is_locked()\n return ret_val", "def is_locked(self):\n if not hasattr(self, \"_memo_init\"):\n return False\n else:\n return self._locked", "def is_locked(self):\n return self._unit_got == False", "def locked(self):\n return self.__lock.locked()", "def locked(self) -> bool:\n return self._lock.locked()", "def Locked(self) -> bool:", "def locked(self):\n return self._owner is not None", "def is_locked(self):\n return self._state == STATE_LOCKED", "def locked(self):\n return self.counter <= 0", "def membership_lock(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"membership_lock\")", "def membership_lock(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"membership_lock\")", "def membership_lock(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"membership_lock\")" ]
[ "0.7656873", "0.70531535", "0.70303595", "0.69371295", "0.6897361", "0.6795601", "0.675714", "0.6750617", "0.67489517", "0.67401165", "0.67396694", "0.66538787", "0.66513664", "0.66513664", "0.66513664", "0.6588879", "0.6574526", "0.65563613", "0.6542578", "0.651717", "0.65046436", "0.6466913", "0.64649385", "0.64631826", "0.6413008", "0.63924915", "0.63389176", "0.63243556", "0.6290835", "0.6290835" ]
0.80184186
0
Load all saved txs to fill the wallet txs
def load_txs(self, tx_storage): for tx in tx_storage._topological_sort_dfs(): self.on_new_tx(tx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_txs(self, txs):\n # For now avoid caching orphan transactions. We might want to show them somehow in the future.\n cli_txs = {tx[\"txid\"]: tx for tx in txs if tx[\"category\"] != \"orphan\"}\n raw_txs = self.cache_raw_txs(cli_txs)\n cached_txs = self.cache_txs(raw_txs)\n\n return cached_txs", "def load_accounts():\n logger.info('Loading accounts...')\n with open(\"C:\\\\Users\\\\harun\\\\Desktop\\\\BigSwede-volume-bot\\\\keys.txt\") as f:\n for key in f:\n key = key.strip()\n address = web3.eth.account.from_key(key).address\n accounts.append((address, key))", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def checkAllTx(self):\n return None", "def populate(self):\n\n NUM_COUNTRIES = 2 # random.randint(1, 4)\n\n # find a suitable hex\n with Timer(\"Creating initial data\", debug=self.debug):\n\n for i in range(NUM_COUNTRIES):\n country, provinces, pops = create_country(self, self.map)\n country.determine_tax_policy()\n self.countries.append(country)", "def xontribs_load(names, verbose=False):\n ctx = builtins.__xonsh__.ctx\n res = ExitCode.OK\n for name in names:\n if verbose:\n print(\"loading xontrib {0!r}\".format(name))\n try:\n update_context(name, ctx=ctx)\n except Exception:\n res = ExitCode.INIT_FAILED\n print_exception(\"Failed to load xontrib {}.\".format(name))\n if hasattr(update_context, \"bad_imports\"):\n res = ExitCode.NOT_FOUND\n prompt_xontrib_install(update_context.bad_imports)\n del update_context.bad_imports\n return res", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def load_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='r') as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n updated_blockchain = []\n for block in blockchain:\n converted_tx = [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']]\n converted_chip = [Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']]\n converted_message = [Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']]\n updated_block = Block(\n block['index'], block['previous_hash'], converted_tx, converted_chip, converted_message, block['proof'], block['timestamp'])\n updated_blockchain.append(updated_block)\n self.chain = updated_blockchain\n\n open_transactions = json.loads(file_content[1][:-1])\n # need to convert the loaded data because Transactions should use OrderedDict\n updated_transactions = []\n for tx in open_transactions:\n updated_transaction = Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount'])\n updated_transactions.append(updated_transaction)\n self.__open_transactions = updated_transactions\n\n open_chipsactions = json.loads(file_content[2][:-1])\n # need to convert the loaded data because Chipsactions should use OrderedDict\n updated_chipsactions = []\n for tx in open_chipsactions:\n updated_chipsaction = Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount'])\n updated_chipsactions.append(updated_chipsaction)\n self.__open_chipsactions = updated_chipsactions\n\n open_messsactions = json.loads(file_content[3][:-1])\n # need to convert the loaded data because Messsactions should use OrderedDict\n updated_messsactions = []\n for tx in open_messsactions:\n updated_messsaction = Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature'])\n updated_messsactions.append(updated_messsaction)\n self.__open_messsactions = updated_messsactions\n\n peer_nodes = json.loads(file_content[4])\n self.__peer_nodes = set(peer_nodes)\n except (IOError, IndexError):\n pass\n finally:\n print('Cleanup!')", "def load_fonts(self):\n for key, font in enumerate(self.fonts):\n self.fonts[key]['font'] = load_font(font['name'], font['size'])\n checkpoint('fonts')", "def loadTrie(self):\n for file in self._gram_files:\n trie_file = getTrieFile(os.path.basename(file), self._pickle_dir)\n with open(trie_file, 'rb') as fd:\n self._tries.append(pickle.load(fd))", "def _load_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\ttry:\r\n\t\t\twith open(self._state_file, 'rb') as tmp:\r\n\t\t\t\tlogger.debug(\"There is a file.\")\r\n\t\t\t\ttmp_dict = pickle.load(tmp)\r\n\t\t\t\tlogger.debug(\"Dictionary loaded from file: %s\" % tmp_dict)\r\n\t\texcept IOError as e: # File doesn't exists\r\n\t\t\tlogger.debug(\"Exit - No file. Error message: %s\" % e)\r\n\t\t\ttmp_dict = {}\r\n\t\t\t\r\n\t\treturn tmp_dict", "def sync():\n\n DFS.update(get_data_without_transactions())\n DFS[c.dfs.TRANS] = get_df_transactions()\n\n YML.update(get_config())", "def test_wallets_get_transaction_list(self):\n pass", "def un_load_all(cls):\n gxapi_cy.WrapEMAPTEMPLATE._un_load_all(GXContext._get_tls_geo())", "def load_data(self):\n try:\n with open(\"blockchain.txt\", mode=\"r\") as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n # OrderedDict\n updated_blockchain = []\n for block in blockchain:\n converted_transfers = [\n Transfer(tx[\"user\"], tx[\"signature\"], tx[\"amount\"])\n for tx in block[\"transfers\"]\n ]\n # converted_transfers = [OrderedDict(\n # [('user', tx['user']), ('amount', tx['amount'])]) for tx in block['transfers']]\n updated_block = Block(\n block[\"index\"],\n block[\"previous_hash\"],\n converted_transfers,\n block[\"proof\"],\n block[\"timestamp\"],\n )\n updated_blockchain.append(updated_block)\n self.__chain = updated_blockchain\n open_transfers = json.loads(file_content[1][:-1])\n # OrderedDict\n updated_transfers = []\n for tx in open_transfers:\n updated_transfer = Transfer(\n tx[\"user\"], tx[\"signature\"], tx[\"amount\"]\n )\n # updated_transfer = OrderedDict(\n # [('user', tx['user']), ('amount', tx['amount'])])\n updated_transfers.append(updated_transfer)\n self.__open_transfers = updated_transfers\n peer_nodes = json.loads(file_content[2])\n self.__peer_nodes = set(peer_nodes)\n\n except (IOError, IndexError):\n pass", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def preload_local_schemas(self):\n schemas = [self.SCHEMA_PACKAGE_DESCRIPTOR,\n self.SCHEMA_SERVICE_DESCRIPTOR,\n self.SCHEMA_FUNCTION_DESCRIPTOR]\n\n for schema in schemas:\n schema_file = self._schemas[schema]['local']\n if not os.path.isfile(schema_file):\n continue\n try:\n self._schemas_library[schema] = load_local_schema(schema_file)\n except FileNotFoundError:\n continue", "def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()", "def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")", "async def reload_all(ctx):\n await ext_manager.reload_all()\n await ctx.send(\"Successfully reloaded.\")", "def load_tas_lookup():\n logger.info('Loading TAS')\n load_tas()", "def load_all_traj():\n pdb='/bpti/bpti-prot/bpti-prot.pdb'\n dcd = lambda x: '/bpti/bpti-prot/bpti-prot-%02d.dcd' % x\n tr = []\n for i in range(11):\n print ('loading ', i)\n start = dt.datetime.now()\n tr.append(md.load(DCD_ALL(i), top=PDB_ALL))\n end = dt.datetime.now()\n print((end-start).total_seconds())\n return tr", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load_users():\n\n print \"Users\"\n\n User.query.delete()\n\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n ID, password, name, first_entry_at = row.split(\"|\")\n first_entry_at = datetime.strptime(first_entry_at, \"%m-%d-%y\")\n\n user = User(ID=ID, password=password, name=name, first_entry_at=first_entry_at)\n\n db.session.add(user)\n\n db.session.commit()", "def test_wallets_get(self):\n pass", "def load_all_trades():\n trades = []\n\n kraken_trades_filename = \"data_private/kraken-trades.csv\"\n if Path(kraken_trades_filename).exists():\n print(\"Found kraken trades!\")\n trades_kraken_csv = _load_csv(kraken_trades_filename)\n trades.extend(_format_csv_from_kraken(trades_kraken_csv))\n\n polo_trades_filename = \"data_private/poloniex-trades.csv\"\n if Path(polo_trades_filename).exists():\n print(\"Found poloniex trades!\")\n trades_polo_csv = _load_csv(polo_trades_filename)\n trades.extend(_format_csv_from_poloniex(trades_polo_csv))\n\n bitstamp_trades_filename = \"data_private/bitstamp-trades.csv\"\n if Path(bitstamp_trades_filename).exists():\n print(\"Found bitstamp trades!\")\n trades_bitstamp_csv = _load_csv(bitstamp_trades_filename)\n trades.extend(_format_csv_from_bitstamp(trades_bitstamp_csv))\n\n lbtc_trades_filename = \"data_private/lbtc-trades.csv\"\n if Path(lbtc_trades_filename).exists():\n print(\"Found lbtc trades!\")\n trades_lbtc_csv = _load_csv(lbtc_trades_filename)\n trades.extend(_format_csv_from_lbtc(trades_lbtc_csv))\n\n return list(sorted(trades, key=lambda t: t[\"time\"]))", "def backup_all(users_dict: dict, start_idx: int, num_checked_emails: int)\\\n -> None:\n backup_users_dict(users_dict)\n backup_int_in_fname(start_idx, START_IDX_FNAME)\n backup_int_in_fname(num_checked_emails, CHECKED_EMAILS_FNAME)", "def load(self) -> bool:\n data = self.wallet.storage.get(\"slp\")\n try:\n assert isinstance(data, dict), \"missing or invalid 'slp' dictionary\"\n ver = data[\"version\"]\n assert ver == self.DATA_VERSION, (\n f\"incompatible or missing slp data version '{ver}', expected\"\n f\" '{self.DATA_VERSION}'\"\n )\n # dict of txid -> int\n self.validity = {k.lower(): int(v) for k, v in data[\"validity\"].items()}\n # dict of \"token_id_hex\" -> dict of [\"txo_name\"] -> qty (int)\n self.token_quantities = {\n k.lower(): {vv0.lower(): int(vv1) for vv0, vv1 in v}\n for k, v in data[\"token_quantities\"].items()\n }\n # build the mapping of prevouthash:n (str) -> token_id_hex (str) from self.token_quantities\n self.txo_token_id = {}\n for token_id_hex, txo_dict in self.token_quantities.items():\n for txo in txo_dict:\n self.txo_token_id[txo] = token_id_hex\n # dict of Address -> set of txo_name\n self.txo_byaddr = {\n address.Address.from_string(k): {vv.lower() for vv in v}\n for k, v in data[\"txo_byaddr\"].items()\n }\n self.need_rebuild = False\n except (\n ValueError,\n TypeError,\n AttributeError,\n address.AddressError,\n AssertionError,\n KeyError,\n ) as e:\n # Note: We want TypeError/AttributeError/KeyError raised above on\n # missing keys since that indicates data inconsistency, hence why\n # the lookups above do not use .get() (thus ensuring the above\n # should raise on incorrect or missing data).\n self.print_error(\"Error loading slp data; will flag for rebuild:\", repr(e))\n self.clear()\n self.need_rebuild = True\n return not self.need_rebuild" ]
[ "0.5589104", "0.5487311", "0.5350113", "0.5330402", "0.52354324", "0.5202624", "0.5198788", "0.51569533", "0.5109451", "0.51038194", "0.5097838", "0.50742316", "0.5059578", "0.5058267", "0.50488466", "0.5033946", "0.49946192", "0.49710378", "0.49665785", "0.495143", "0.49480507", "0.49418694", "0.49365112", "0.4924291", "0.49235362", "0.4905069", "0.4889927", "0.48813668", "0.48793575", "0.48752758" ]
0.5536482
1
Validate if set of words is valid If words is None or is not valid we raise error
def validate_words(self): if not self.words or not self.mnemonic.check(self.words): raise InvalidWords
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n\n if self.words_list is None:\n raise Exception('Words list is not set')", "def is_valid_words(args, skip=False):\n if is_valid_file_and_directory(args) or skip:\n if args.words is not None:\n return True\n return False", "def validate(self, word):\n\n # Strip unwanted characters\n clean = re.sub(r\"[^a-zA-Z- ]+\", \"\", word).strip().lower()\n if len(clean) <= 1:\n return None # Word too short\n\n # Generate candidates for possible compound words\n # \"valid\" -> [\"valid\"]\n # \"cul de sac\" -> [\"cul-de-sac\", \"culdesac\"]\n # \"top-hat\" -> [\"top-hat\", \"tophat\"]\n candidates = []\n if \" \" in clean:\n candidates.append(re.sub(r\" +\", \"-\", clean))\n candidates.append(re.sub(r\" +\", \"\", clean))\n else:\n candidates.append(clean)\n if \"-\" in clean:\n candidates.append(re.sub(r\"-+\", \"\", clean))\n for cand in candidates:\n if cand in self.vectors:\n return cand # Return first word that is in model\n return None # Could not find valid word", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def allow(self, words):\n\t\tallowed = [word for word in words if re.match('^[A-Za-z0-9\\.\\,\\:\\;\\!\\?\\(\\)\\'\\-\\$\\@\\%\\\"]+$', word) is not None]\t\t\n\t\treturn allowed", "def badwords_detector(value):\n for badword in BADWORDS:\n if badword.lower() in value.lower():\n raise ValidationError('La palabra {} no está permitida'.format(badword))\n\n #Si todo va OK, devuelvo True\n return True", "def validate(self, word):\n\n return self.valid_word(word)", "def validate(self, word):\n\n return self.valid_word(word)", "def isWordSet(self):\n return len(self.getWord()) != 0", "def test_non_required_validation(self):\r\n Text().validate('')\r\n Text().validate(None)", "def is_valid(line):\n word_list = line.split()\n init_word_count = len(word_list)\n\n # a set to hold the words\n non_dup_words = set(word_list)\n non_dup_count = len(non_dup_words)\n\n return (init_word_count == non_dup_count)", "def validateWord(sValue, cchMin = 1, cchMax = 64, asValid = None, aoNilValues = tuple([None, '']), fAllowNull = True):\n if sValue in aoNilValues:\n return (sValue, None if fAllowNull else 'Mandatory.');\n\n if re.search('[^a-zA-Z0-9_-]', sValue) is not None:\n sError = 'Single word ([a-zA-Z0-9_-]), please.';\n elif cchMin is not None and len(sValue) < cchMin:\n sError = 'Too short, min %s chars' % (cchMin,);\n elif cchMax is not None and len(sValue) > cchMax:\n sError = 'Too long, max %s chars' % (cchMax,);\n elif asValid is not None and sValue not in asValid:\n sError = 'Invalid value \"%s\", must be one of: %s' % (sValue, asValid);\n else:\n sError = None;\n return (sValue, sError);", "def test_validate_dupes(self):\n dupes, msg = validate_words([\"foo\", \"bar\", \"baz\", \"foo\"])\n self.assertEqual([\"foo\"], dupes)", "def validate_word(self, word, normalize=True):\n return not self._segs(word, include_valid=False, include_invalid=True, normalize=normalize)", "def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)", "def is_valid_word(word, hand, word_list):\n failure=True\n word=word.lower()\n if word not in word_list:\n failure=False\n for i in word:\n w=hand.get(i,0)\n if w==0:\n failure=False\n break\n return failure", "def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"", "def _validate_set(val):\n if not isinstance(val, set):\n raise ValueError(\"Passed value {} is not a set\".format(val))\n if not all([isinstance(char, str) for char in val]):\n raise ValueError(\"Passed overrides of non-string to overrides\")\n return val", "def _validate(self):\n for p in self.parameters:\n #Check for missing required parameters:\n if p.is_required and not(p.is_set):\n raise ValueError(\"Parameter %s is not set.\" \\\n % p.names[-1])\n #Also repeat the parameter validation here, just in case?", "def word_length_check(self):\r\n \r\n for word in self.all_words:\r\n if len(word) == len(self.best_guess):\r\n self.valid_words.add(word)", "def add_words(self, words: List[str], **kwargs: Any) -> None:\n kwargs.setdefault('check_density', False)\n kwargs.setdefault('check_count', False)\n for word in words:\n with suppress(WordLengthError):\n self.add_word(word, **kwargs)", "def valid(phrase):\n words = []\n series_of_words = phrase.split(' ')\n words.append(series_of_words.pop())\n for word in series_of_words:\n if word in words:\n return False\n words.append(word)\n return True", "def validate_vocabulary(value):\n if queryUtility(IVocabularyFactory, value) is None:\n raise Invalid(_(u\"Not a vocabulary: %s\") % value)\n return True", "def check_inputs(self, inputs):\n if self.debug:\n print(\"Checking inputs\")\n result = True\n for _input in inputs:\n if \"word_\" in _input and inputs[_input] == \"\":\n result = False\n elif \"idiom_\" in _input and inputs[_input] == \"\":\n if \"list\" not in _input:\n result = False\n return result", "def check_words(dictionary_, start_word, stop_word):\n if dictionary_.is_real_word(start_word) is False:\n print(\"Word {} not found in the dictionary\".format(start_word))\n return False\n if dictionary_.is_real_word(stop_word) is False:\n print(\"Word {} not found in the dictionary\".format(stop_word))\n return False\n return True", "def is_validword(word, hand, word_list1):\n # TO DO ... <-- Remove this comment when you code this function\n word_list = []\n cnt_1 = 0\n for i in word:\n word_list += i.split(\",\")\n for i in word_list:\n if i in hand.keys():\n cnt_1 += 1\n if cnt_1 == len(word) and word in word_list1:\n score = get_word_score(word, n_num)\n update_hand(hand, word)\n else:\n print(\"Invalid Word\")", "def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )", "def validate(self) -> None:\n names: set[str] = set()\n for name in (\n *(i.name for i in self.typed_dicts),\n *(i.name for i in self.literals),\n *(i.name for i in self.waiters),\n *(i.name for i in self.paginators),\n *(self.service_resource.get_all_names() if self.service_resource else []),\n ):\n if is_reserved(name):\n raise ValueError(f\"{name} is a reserved keyword\")\n if name in names:\n for typed_dict in self.typed_dicts:\n if typed_dict.name == name:\n self.logger.warning(\n f\"{typed_dict}: {[c.render() for c in typed_dict.children]}\"\n )\n raise ValueError(f\"Duplicate name {name}\")\n names.add(name)", "def validate(self):\n\n if self.indexer_table is None:\n raise Exception('Indexed words table is not set')", "def test_validate_lens(self):\n lens, msg = validate_words([\"foo\", \"john\", \"bar\", \"baz\"])\n self.assertEqual([\"john\"], lens)" ]
[ "0.76675195", "0.6575766", "0.6496483", "0.6485126", "0.6357611", "0.62600315", "0.6259116", "0.6259116", "0.6186593", "0.61348206", "0.61265373", "0.610123", "0.6101203", "0.60853904", "0.6045598", "0.60430956", "0.5989423", "0.5966042", "0.5959515", "0.5955842", "0.5943565", "0.5942606", "0.59318626", "0.5855", "0.5853591", "0.58198094", "0.58183736", "0.5740878", "0.57394826", "0.57362616" ]
0.75707555
1
Total cost of a given set s
def total_cost(self): return np.einsum('i->', self.c[self.s])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def costFun(self, S, x):", "def calculate_total_cost(state):\r\n return state.cost()", "def cost(self) -> float:", "def calculate_total_cost(state):\n pass", "def total_cost(clusters):\n inter = 0\n intra = 0\n dm = 0\n for clst in clusters:\n # print clst.label, \"has cost: \", str(clst.inter_cost), str(clst.intra_cost), str(clst.dm_cost)\n inter += clst.inter_cost\n intra += clst.intra_cost\n dm += clst.dm_cost\n total = inter + intra + dm\n #iic = inter + intra\n #print \"inter \" + str(inter) + \" intra \" + str(intra) + \" dm \" + str(dm) + \" total \" + str(total) + \" iic \" + str(iic)\n print str(inter) + \"\\t\" + str(intra) + \"\\t\" + str(dm) + \"\\t\" + str(total) # + \" in \" + str(inr)\n return inter, intra, dm, total", "def tablecost(self):\n subtotal_getter = operator.attrgetter(\"subtotal\")\n\n cost = 0.0\n\n cost += sum(map(subtotal_getter, self.materials))\n cost += sum(map(subtotal_getter, self.processes))\n cost += sum(map(subtotal_getter, self.fasteners))\n cost += sum(map(subtotal_getter, self.toolings))\n\n return cost", "def calculate_cost(data, centers, clusters):\n total = 0\n for i in range(len(centers)):\n total = total + np.sum(data[centers[i]][clusters[i]]) \n return total", "def storage_operating_costs_rule(_m, y, s):\r\n\r\n return sum(m.C_MC[g, y] * m.p_out[g, y, s, t] for g in m.G_STORAGE for t in m.T)", "def sumSet(weightedSet):\n\tsum = 0\n\tfor example in weightedSet:\n\t\tsum += example.weight\n\treturn sum", "def calculate_shares_cost_sum(dataset):\n cost_sum = 0\n for data in dataset:\n cost_sum += data[1]\n return cost_sum", "def intra_cost(points, cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in points:\n if point != pt and pt not in cluster.points:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n return int(sum(map(_p2p, cluster.points)))", "def cost_total(X, cost_weights=(1.0, 1.0, 1.0)):\n return cost_weights[0] * cost_distance(X) + \\\n cost_weights[1] * cost_same_team_by_distance(X) + \\\n cost_weights[2] * cost_previous_neighbour_by_distance(X, normalize=True)", "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def _c2c_cost(sclst, eclst):\n def _c2c(point):\n _c_sum = 0\n for pt in eclst.points:\n _c_sum += point.frequency(pt)\n return _c_sum\n return int(sum(map(_c2c, sclst.points)))", "def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass", "def calculateCosts(self):\n self.costs = 0\n for house in self.houses:\n if not house.distance == 1000:\n self.costs += house.distance * 9\n for battery in self.batteries:\n self.costs += battery.costs\n return self.costs", "def cost(self):\n\t\treturn self.g + self.h", "def scenario_cost_rule(_m, y, s):\r\n\r\n return m.OP_T[y, s] + m.OP_H[y, s] + m.OP_W[y, s] + m.OP_S[y, s] + m.OP_Q[y, s] + m.OP_L[y, s]", "def calculateCost(self,sol,weights):\n\t\treturn sum([x.value*y if x != None else 0 \\\n\t\t\t\t\tfor x,y in zip(sol,weights)])", "def total_cost(self):\r\n return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101\r", "def get_expected_cost(self):", "def inter_cost(cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in cluster.points:\n if point != pt:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n\n return int(sum(map(_p2p, cluster.points)))", "def _calculate_costs(self):\n cost = 0\n cost += self._cost_route_fine()\n cost += self._cost_petrol()\n cost += self._cost_wage()\n cost += self._cost_refueling()\n cost += self._cost_caught_by_police()\n cost += self._cost_vehicle_malfunction()\n return cost", "def calculate_cost(self):\n number_collisions = self.get_collisions()\n cs = dict(\n number_collisions=number_collisions,\n cost_collisions=number_collisions\n )\n # sum all costs in one total cost\n cs['cost'] = sum(v for k, v in cs.items() if k.startswith('cost_'))\n\n return cs", "def solar_operating_costs_rule(_m, y, s):\r\n\r\n # Cost for existing solar units\r\n existing = sum(m.C_MC[g, y] * m.p[g, y, s, t] for g in m.G_E_SOLAR for t in m.T)\r\n\r\n # Cost for candidate solar units\r\n candidate = sum((m.C_MC[g, y] - (m.baseline[y] * m.permit_price[y])) * m.p[g, y, s, t]\r\n for g in m.G_C_SOLAR for t in m.T)\r\n\r\n return existing + candidate", "def thermal_operating_costs_rule(_m, y, s):\r\n\r\n return sum((m.C_MC[g, y] + ((m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y])) * m.p[g, y, s, t]\r\n for g in m.G_THERM for t in m.T)", "def total_cost(self, system=None):\n system = system or self.system()\n if system == 'grid':\n cost = self['system (grid)']['internal system nodal cost']\n else:\n cost = self['system (%s)' % system]['system nodal cost']\n cost = float(cost)\n return cost", "def som(getallenlijst):\r\n total = sum(getallenlijst)\r\n return total", "def unitcost(self):\n cost = self.tablecost\n\n for component, quantity in self.components.items():\n cost += component.unitcost * quantity\n\n return cost", "def hydro_operating_costs_rule(_m, y, s):\r\n\r\n return sum(m.C_MC[g, y] * m.p[g, y, s, t] for g in m.G_E_HYDRO for t in m.T)" ]
[ "0.69436836", "0.69279075", "0.6916112", "0.68575907", "0.67779595", "0.6609439", "0.66088957", "0.65838075", "0.65287316", "0.6502622", "0.63530636", "0.6344556", "0.6336414", "0.63225985", "0.629074", "0.625908", "0.625543", "0.62157536", "0.61710256", "0.6160492", "0.61533785", "0.6152184", "0.6147194", "0.6130239", "0.60698724", "0.6011189", "0.5993808", "0.59894073", "0.5978628", "0.59584117" ]
0.74787
0
Reset s, the selected columns
def reset_s(self): self.s = np.copy(self.f_uniq) # (current) solution, selected column
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n for col in self._columns:\n col.reset()\n\n self._next_column = 0\n self._columns = []", "def clear_columns(self):\n self._columns = []\n return self", "def clear(self):\n for col in self.cols:\n self.data[col] = []\n return self", "def reset(self):\n self.table[:, :] = 0\n self.counts[:] = 0\n self.names = []\n self.hashesperid.resize(0)\n self.dirty = True", "def reset_state(self):\n self.s = np.copy(self.s_i)", "def reset_columns(self):\n\n reset_cols = [i for i in self.__cols if i in self.__df_timings.columns]\n self.__df_timings = self.__df_timings.loc[:, reset_cols]\n return", "def reset_state(self):\n for row in range(len(self.state)):\n for column in range(len(self.state[row])):\n self.state[row][column] = None", "def set_selected_cols(self, cols):\n if cols is not None:\n assert len(\n cols\n ) == self.nwann, \"Number of columns should be equal to number of Wannier functions\"\n self.cols = cols\n if self.sort_cols:\n self.cols = np.sort(self.cols)", "def reset(self):\n self.selection_bounds = None\n self.selection = []\n for c in self.callbacks[\"reset_data\"]:\n c()\n if self.context is not None and self.context.doc is not None:\n self.context.doc.add_next_tick_callback(self.update_source)", "def clear(self):\n for key in self.__columns:\n self.__widths[key] = 0\n self.__data = []\n self.__selectedRow = -1\n self.__formatString = \"\"\n self._window.clear()\n self.drawBorder()", "def reset_S(self):\n self.S = [self._one_S(self.D[n]) for n in range(self.L + 1)]", "def setAllColumns(self, newAllColumns):\n \n pass", "def _reset_state(self):\n super(Cursor, self)._reset_state()\n self._nextUri = None\n self._columns = OrderedDict()\n self._rownumber = 0\n self._operation = None\n self._actual_cols = OrderedDict()", "def reset(self):\n self.source_data = self.get_dict_from_range(None, None)\n self.selection_bounds = None\n self.selection = []\n for c in self.callbacks[\"reset_data\"]:\n c()\n if self.context is not None:\n self.context.doc.add_next_tick_callback(self.update_source)", "def clear_previous_selections(self):\n self.headers = []\n self.filename = ''\n self.x_axis = ''\n self.y_axis = ''\n self.delim = ''\n self.non_numeric_x_axis = False\n self.count_desired = False", "def clear_rows(self):\n ...", "def reset(self):\n self.rows = deepcopy(self.empty_rows)\n self._update_max_row_info()", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset(self):\r\n\t\tself.index = 0", "def reset(self):\n self.liidx = 0\n self.clidx = 0", "def reset_selections():\n return True", "def reset(self):\n self._idx = 0", "def reset(self, board):", "def clear_sel(self):\n run(['ipmitool', 'sel', 'clear'], stdout=DEVNULL, stderr=DEVNULL)", "def reset(self):\n for rows in range(self.height):\n for col in range(self.width):\n self.slots[rows][col] = ' '", "def _reset(self):\n self.description = None\n self.rowcount = -1\n self.colcount = -1\n self._close_result_set()", "def reset(self):\n self._last_item = None\n self._connected_items = []\n\n self._title_label.deleteLater()\n\n for item in self._items:\n item.deleteLater()\n\n for i in range(self._column_span):\n self._grid.setColumnStretch(self._column_id + i, 0)\n\n self._items = []\n self._row_index = 0", "def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]", "def reset_filters():\n logger.info(\"reset filters\")\n global filter_item\n filter_item = -1\n filter_topics_table.view.filters = [IndexFilter()]\n filter_custom_table.view.filters = [IndexFilter()]\n filter_label.text = \"\"", "def cmdClear(self, args):\n for (x, y) in self.genRowColCell(args):\n self.state[y][x] = 0\n self.matrix.color(x, y, sequence[0])" ]
[ "0.69522244", "0.6536586", "0.6393485", "0.63491374", "0.61455876", "0.60716295", "0.6006219", "0.59355867", "0.58931756", "0.5868809", "0.5864084", "0.58631307", "0.58542395", "0.58395255", "0.5830489", "0.57751304", "0.5765056", "0.5763039", "0.57629293", "0.5731909", "0.5672633", "0.5670909", "0.56665844", "0.5627279", "0.56253546", "0.5611746", "0.5593079", "0.55922055", "0.55852014", "0.55774504" ]
0.7383071
0
Fix the unique columns that have to be in the minimal set
def _fix_uniq_col(self): # subgradient; for two boolean arrays, multiplication seems to be the best way # (equivalent to logical_and) n_covered_col = self.a_csr.dot(np.ones(self.ncols)) ifix = np.zeros(self.ncols, dtype=bool) if (np.count_nonzero(n_covered_col) != self.mrows): raise ValueError("There are uncovered rows! Please check your input!") if (np.any(n_covered_col==1)): inonzero = self.a_csr[n_covered_col==1,:].nonzero() ifix[inonzero[1]] = True return ifix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique(self):\n # variables for uniques \n self._currentSet = 1\n self._uniqueValue = {}\n\n pd = self._dataTable\n for col in pd:\n arr = pd[col].unique()\n for i in arr:\n unique_entry = ((col,i),)\n self._uniqueValue[unique_entry] = 0 \n\n self._sets[self._currentSet] = self._uniqueValue", "def unique_cols(self):\n return list(set([coord[1] for coord in self.landscape]))", "def ensure_sparse_cols(self,max_density,remove_lowest=True):\n if max_density >= 1:\n max_nnz = int(max_density)\n else:\n max_nnz = int(max_density*self.shape[0])\n for j in range(self.shape[1]):\n col = self.fast_get_col(j)\n excess = col.nnz - max_nnz\n if excess > 0:\n if remove_lowest:\n zero_entries = np.argsort(col.data)[:excess]\n else:\n zero_entries = random.sample(range(col.nnz),excess)\n col.data[zero_entries] = 0\n self.fast_update_col(j,col.data)", "def assert_unique_cols_unique(self, df):\n assert not df.duplicated(self.unique_cols).any()", "def __clean_df(self):\n self.__convert_min()", "def combine_columns(allowed_columns):\n\n v_columns = [v for v in allowed_columns if v in df.columns]\n v_columns.sort()\n for i in range(1, len(v_columns)):\n df[v_columns[0]] = df[v_columns[0]].fillna(df[v_columns[i]])\n df.drop(v_columns[i], 1, inplace=True)\n return v_columns[0]", "def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)", "def strip_static_cols(df):\n for col in df.columns:\n if len((df[col]).unique()) == 1:\n df.drop(columns=[col], inplace=True)\n return df", "def zero_one_card(df):\n unique_values = defaultdict()\n for col in df.columns:\n if df[col].nunique() < 2:\n unique_values[col] = df[col].nunique()\n if len(unique_values) > 0:\n printmd(str(\"* Columns: *\"+', '.join(list(unique_values.keys()))+\"* have less than two different values\"))\n for col in unique_values.keys():\n printmd(str('* *' + col + \"* has \" + str(df[col].nunique()) + ' differents values :' + str(df[col].unique())))\n else:\n printmd(\"* No columns have less than 2 different values\")", "def _set_unique_and_null_vals(self):\n self.unique_vals = {}\n \n df_col = self.df[self.col]\n u_vals = pandas.unique( df_col[ df_col.notnull() ] )\n \n for val in u_vals:\n self.unique_vals[val] = np.where( df_col==val)[0]\n \n null_inds = np.where(self.df.isnull()[self.col]) [0]\n if null_inds.size:\n self.unique_vals['NULL__'] = null_inds", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def make_canonical(self):\n n = self.cardinality\n minuc = [self.uc[i] for i in range(n)]\n perms = [[0]+p+[n-1] for p in permutations(1,n-1)]\n for p in perms:\n puc = range(n)\n for i in range(n):\n puc[p[i]] = sorted([p[y] for y in self.uc[i]])\n if puc < minuc: minuc = puc\n return minuc", "def columns_to_fix(df):\n return [col for col in df.columns.values if any([k in col and v in col for k, v in symmetric_dihedrals.items()])]", "def clear_columns(prefixlist,datas):\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n ccc=[c.lower() for c in ccc]\n \n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas.rename(columns=d,inplace=True)\n\n u, i = np.unique(datas.columns, return_index=True)\n y=u[np.argsort(i)] \n \n r=[datas.columns.tolist().index(rr)for rr in y]\n\n return datas.iloc[:, r]", "def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset", "def __clean_repeated_columns(self, df, column_type):\n for column in df.columns:\n if column_type in column.lower():\n # Fill main column with data from \"prefix + _\" type column names.\n df[column_type[:-1]].fillna(df[column], inplace=True)\n # Drop the \"prefix + _\" type column names.\n df.drop(column, axis=1, inplace=True)", "def minimal_clean_data_inplace(df):\n # There are some 'unknown' users in train dataset only\n unknown_data_lines = df['sexo'].isnull() & df['age'].isnull() & df['ind_empleado'].isnull() & \\\n df['fecha_alta'].isnull() & df['pais_residencia'].isnull()\n\n logging.info(\"- Number of lines with unknown data : %s\" % unknown_data_lines.sum())\n\n # Remove these users as clients\n _clients = df[unknown_data_lines]['ncodpers'].unique()\n bad_lines = df['ncodpers'].isin(_clients)\n df.drop(df[bad_lines].index, inplace=True)\n\n logging.info(\"- Number of columns with nan : %s\" % df.isnull().any().sum())\n\n # Remove accent\n df.loc[df['nomprov'] == \"CORU\\xc3\\x91A, A\", \"nomprov\"] = \"CORUNA\"\n\n unknown_cols = ['sexo',\n 'ind_empleado',\n 'pais_residencia',\n 'ult_fec_cli_1t',\n 'conyuemp',\n 'canal_entrada',\n 'nomprov',\n 'segmento',\n 'tiprel_1mes',\n 'indrel_1mes']\n # Start with cols -> replace nan with UNKNOWN\n for col in unknown_cols:\n df.loc[df[col].isnull(), col] = \"UNKNOWN\"\n\n # Set unknown renta to -99\n df.loc[df['renta'].isnull(), 'renta'] = -99\n\n # Next `fecha_alta` :\n assert df['fecha_alta'].isnull().sum() == 0, \\\n \"Need to replace nan in 'fecha_alta', count=%s\" % df['fecha_alta'].isnull().sum()\n\n # **Remove 'tipodom' and 'cod_prov' columns**\n df.drop([\"tipodom\", \"cod_prov\"], axis=1, inplace=True)\n \n # Convert 'ind_nuevo' to int\n df['ind_nuevo'] = df['ind_nuevo'].astype(int)\n \n # Remove floating point at string indrel_1mes\n df['indrel_1mes'] = df['indrel_1mes'].apply(lambda x: str(int(float(x))) if len(x) == 3 else x)\n\n if \"ind_nomina_ult1\" in df.columns and \"ind_nom_pens_ult1\" in df.columns:\n # Target labels : `ind_nomina_ult1`, `ind_nom_pens_ult1` : nan -> 0\n # I could try to fill in missing values for products by looking at previous months,\n # but since it's such a small number of values for now I'll take the cheap way out.\n df.loc[df.ind_nomina_ult1.isnull(), \"ind_nomina_ult1\"] = 0\n df.loc[df.ind_nom_pens_ult1.isnull(), \"ind_nom_pens_ult1\"] = 0\n\n # replace 'antiguedad' with the number of months between 'fecha_alta' and 'fecha_dato'\n func1 = lambda x: _to_ym_dec(to_yearmonth(x))\n func2 = lambda x: max(_to_nb_months(x), 0) \n\n v1 = df['fecha_dato'].apply(func1)\n v2 = df['fecha_alta'].apply(func1)\n v3 = (v1 - v2).apply(func2)\n df.loc[:, 'antiguedad'] = v3\n \n # Replace 'ult_fec_cli_1t' by current nb of months from fecha_dato, if negative, set to zero\n mask = df['ult_fec_cli_1t'] == 'UNKNOWN'\n df.loc[mask, 'ult_fec_cli_1t'] = df[mask]['fecha_dato']\n v1 = df['fecha_dato'].apply(func1)\n v2 = df['ult_fec_cli_1t'].apply(func1)\n v3 = (v1 - v2).apply(func2)\n df.loc[:, 'ult_fec_cli_1t'] = v3", "def delete_unique(self, table_name, columns):\r\n print \" ! WARNING: SQLite does not support removing unique constraints. Ignored.\"", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def complete_columns(training_df, valid_df):\n for c in valid_df.columns:\n if c not in training_df.columns:\n training_df[c] = 0\n for c in training_df.columns:\n if c not in valid_df.columns:\n valid_df[c] = 0\n return training_df, valid_df", "def unique_column_values(rows, column_name):\n # declare a set that guarantees no duplicates in the answer\n value_set = set()\n # for all rows, add the value of indicated column to the set\n for row in rows:\n \tvalue_set.add(row[column_name])\n return value_set", "def perm4missing(flights, col, N):\n\n return ...", "def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated", "def unique_columns(inval, axis=0):\n # this is a nice trick taking advantage of structed arrays where each row or column\n # is the value, so returl unique works\n # np.ascontiguousarray() is to be really sure it will work\n if axis == 0:\n val = np.ascontiguousarray(np.transpose(inval))\n else:\n val = np.ascontiguousarray(inval)\n b = val.view(np.dtype((np.void, val.dtype.itemsize * val.shape[1])))\n unique_a = np.unique(b).view(val.dtype).reshape(-1, val.shape[1])\n return unique_a", "def compare(old_dataframe, fresh_dataframe):\n combined_dataframe = pd.concat([old_dataframe, fresh_dataframe])\n combined_dataframe = combined_dataframe.reset_index(drop=True)\n\n grouped_dataframes = combined_dataframe.groupby(DataFrameRow.REQUIRED)\n\n # if there is overlap, there will be a column with length > 1\n unique_indices = [col[0] for col in grouped_dataframes.groups.values() if\n len(col) == 1]\n\n return combined_dataframe.reindex(unique_indices)", "def expand_cat_cols_rmv_all(df,exclude=None):\n cols=get_non_num_cols(df)\n for col in cols:\n if exclude==None or( not exclude == None and not col in exclude):\n vals = df[col].unique()\n for val in vals:\n new_col_name=col + \"_\" +val\n df[new_col_name]=int(df.loc[:,col].to_list()==val)\n print(col)\n del df[col]", "def nonunique_gens(df,\n key_cols=['plant_id_eia', 'generator_id', 'report_date']):\n unique_gens = df.drop_duplicates(subset=key_cols)\n dupes = df[~df.isin(unique_gens)].dropna()\n dupes = dupes.sort_values(by=key_cols)\n return dupes", "def _rebuild_compareset(self, result, rewrapped_columns, columns):\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n columns = normalize(columns)\n\n if rewrapped_columns == columns:\n return result # <- EXIT!\n\n missing = self._missing\n def rebuild(x):\n lookup_dict = dict(zip(rewrapped_columns, x))\n return tuple(lookup_dict.get(c, missing) for c in columns)\n return CompareSet(rebuild(x) for x in result)", "def _propagate_duplicate_cols(self, duplicate_cols):\n for duplicate in duplicate_cols:\n no_suffix = \"_\".join(duplicate.split(\"_\")[:-1])\n null_idx = self._hybrid_meta[no_suffix].isnull()\n non_null_vals = self._hybrid_meta.loc[null_idx, duplicate].values\n self._hybrid_meta.loc[null_idx, no_suffix] = non_null_vals" ]
[ "0.62135583", "0.6177121", "0.6156773", "0.60971934", "0.60081965", "0.6004404", "0.5981918", "0.5978237", "0.5898628", "0.58933026", "0.5860984", "0.58033264", "0.57526135", "0.5745792", "0.57044876", "0.5678293", "0.567165", "0.56686383", "0.56637037", "0.564481", "0.5639248", "0.56299514", "0.5628205", "0.5620778", "0.55961883", "0.5585833", "0.5558066", "0.55348176", "0.55318373", "0.55316406" ]
0.6250709
0
Subgradient step for the core problem N\S.
def subgradient(self): UB_full = self.total_cost ufull = np.copy(self.u) # Update core: possible bottleneck (a_csr, a_csc) = self.update_core() mrows = a_csr.shape[0] ncols = a_csr.shape[1] u_this = self.u[~self.f_covered] # np.einsum is 20% faster than np.sum ... UB_fixed = self.fixed_cost UB = UB_full - UB_fixed cost = self.c[~self.f] # save nsteps calculations (Lagrangian multipliers and lower bounds) u_sequence = np.zeros((mrows, self._subg_nsteps)) Lu_sequence = np.zeros(self._subg_nsteps) # update u x = np.zeros(ncols, dtype=bool) niters_max = self._subg_maxiters maxfracchange = self._subg_maxfracchange maxabschange = self._subg_maxabschange # initialization f_change = _largenumber a_change = _largenumber niters = 0 Lu_max0 = 0 while ((f_change>maxfracchange) or (a_change>maxabschange)) and (niters<niters_max): u_this = (1.0+(np.random.rand(mrows)*2.-1)*self._u_perturb)*u_this u_sequence[:,0] = u_this cost_u = cost - a_csc.dot(u_sequence[:,0]) # Lagrangian cost # next lower bound of the Lagrangian subproblem Lu_sequence[0] = np.einsum('i->', cost_u[cost_u<0])+np.einsum('i->', u_sequence[:,0]) for i in np.arange(self._subg_nsteps-1): # current solution to the Lagrangian subproblem x[0:] = False x[cost_u<0] = True # subgradient; for two boolean arrays, multiplication seems to be the best way # (equivalent to logical_and) s_u = 1. - a_csr.dot(x.astype(int)) s_u_norm = np.einsum('i,i',s_u,s_u) # subgradient's norm squared # Update # next Lagrangian multiplier u_temp = u_sequence[:,i]+self._stepsize*(UB - Lu_sequence[i])/s_u_norm*s_u u_temp[u_temp<0] = 0 u_sequence[:,i+1] = u_temp cost_u = cost - a_csc.dot(u_sequence[:,i+1]) # Lagrangian cost # next lower bound of the Lagrangian subproblem Lu_sequence[i+1] = np.einsum('i->', cost_u[cost_u<0])+np.einsum('i->', u_sequence[:,i+1]) #print(UB_full, UB, Lu_sequence[i+1]) # Check the last nadaptive steps and see if the step size needs to be adapted if (np.mod(i+1,self._subg_nadaptive)==0): Lu_max_adapt = np.amax(Lu_sequence[i+1-self._subg_nadaptive:i+1]) Lu_min_adapt = np.amin(Lu_sequence[i+1-self._subg_nadaptive:i+1]) if (Lu_max_adapt <= 0.): Lu_max_adapt = _smallnumber f_change_adapt = (Lu_max_adapt-Lu_min_adapt)/np.fabs(Lu_max_adapt) if f_change_adapt > self._max_adapt: self._stepsize = self._stepsize*0.5 elif (f_change_adapt < self._min_adapt) and (self._stepsize<1.5): self._stepsize = self._stepsize*1.5 # swap the last multiplier with the optimal one i_optimal = np.argmax(Lu_sequence[i+1-self._subg_nadaptive:i+1]) if (i_optimal != (self._subg_nadaptive-1)): u_temp = u_sequence[:,i] u_sequence[:,i] = u_sequence[:,i+1-self._subg_nadaptive+i_optimal] u_sequence[:,i+1-self._subg_nadaptive+i_optimal] = u_temp Lu_sequence[i+1-self._subg_nadaptive+i_optimal] = Lu_sequence[i] Lu_sequence[i] = Lu_max_adapt i_optimal = np.argmax(Lu_sequence) Lu_max = Lu_sequence[i_optimal] u_this = u_sequence[:,i_optimal] niters = niters + 1 a_change = Lu_max - Lu_max0 f_change = a_change/np.fabs(Lu_max) Lu_max0 = Lu_max # Just a copy. Not the reference (It's a number object) # save current u_this??? if (niters == niters_max): warnings.warn("Iteration in subgradient reaches maximum = {0}".format(niters)) # update multipliers self.u[~self.f_covered] = u_this # return the last nsteps multipliers # save nsteps calculations (Lagrangian multipliers and lower bounds) u_sequence_full = np.zeros((self.mrows, self._subg_nsteps)) Lu_sequence_full = np.zeros(self._subg_nsteps) u_sequence_full[self.f_covered,:] = self.u[self.f_covered][:, np.newaxis] u_sequence_full[~self.f_covered,:] = u_sequence Lu_sequence_full = Lu_sequence + self.fixed_cost return (u_sequence_full, Lu_sequence_full)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_subgradient(w, data):\n x, = data\n return -x / (w @ x)", "def compute_subgradient(w, data):\n x, y = data\n return -x if w @ x < y else x", "def gradient(self, x):\n pass", "def gradFun(self, S, x):", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def _Take_subdiff_step(x0, y0, D, dt, r_c, nsubsteps=100):\n dt_prim = dt / nsubsteps\n for i in range(nsubsteps):\n x1, y1 = (\n x0 + np.random.normal(0, np.sqrt(2 * D * dt_prim)),\n y0 + np.random.normal(0, np.sqrt(2 * D * dt_prim)),\n )\n if np.sqrt(x1 ** 2 + y1 ** 2) < r_c:\n x0, y0 = x1, y1\n return x1, y1", "def gradient(f, x, s=_DEFAULT_STEP):\n x = np.asarray(x)\n n = len(x)\n e = np.eye(n)\n\n forw = np.zeros(n)\n for i in range(n):\n forw[i] = f(x + s*e[i])\n\n g = (forw - f(x)) / s\n return g", "def gradient_step(self):\n n = 10 #Granularity of line search\n grad = self.gradient()\n W = project(self.W[-1] + grad)\n A = np.linspace(0., self.alpha, n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def disconnected_grad(x):\n return disconnected_grad_(x)", "def Step_Substrates(S,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NC,qana,qcat,dt,Vc):\n\tH = S[0]\n\tC = S[1]\n\tN = S[2]\n\tG = S[3]\n\n\tnH = H + (QH*(Hinf-H)+(qcat*Catabolism[0]+qana*Anabolism[0])*NC)*dt\n\tnC = C + (QC*(Cinf-C)+(qcat*Catabolism[1]+qana*Anabolism[1])*NC)*dt\n\tnN = N + (QN + (qcat*Catabolism[2]+qana*Anabolism[2])*NC)*dt\n\tnG = G + (QG*(Ginf-G)+(qcat*Catabolism[3]+qana*Anabolism[3])*NC)*dt\n\n\tnS = np.array([nH,nC,nN,nG])\n\tnS[np.where(nS <= 1e-100)] = 1e-100\n\n\treturn(nS)", "def step_linear_double(step):\n return step * 2", "def compute_gradient(self, function, arguments):", "def gradient(self, x):\n return 0.0", "def compute_gradient(self, input, error):\n raise NotImplementedError()", "def gradient(self,i,f):\n\n diff = self.points[f, :] - self.points[i, :]\n gradient = diff[1]/diff[0]\n\n return gradient", "def grad(self, w):\n subgradient = self.alpha * np.sign(w)\n \n # Insert 0 for bias term.\n return np.insert(subgradient, 0, 0, axis=0)", "def backward(self):\n gradient = blah\n return gradient", "def backward(self):\n gradient = blah\n return gradient", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def test_gradient_convergence(self):\n pass", "def gradient(self):\n return NotImplemented", "def sgd_step(df, alpha, prev_beta, xy_i):\n x_i, y_i = xy_i\n gradient = df(x_i, y_i, prev_beta)\n return [beta_j + alpha * df_j\n for beta_j, df_j in zip(prev_beta, gradient)]", "def compute_gradient_minibatch(self, minibatch):\r\n \r\n # YOUR CODE HERE\r\n start_point = (minibatch-1) * self.MINIBATCH_SIZE\r\n end_point = minibatch * self.MINIBATCH_SIZE\r\n\r\n self.compute_gradient_for_subset(start_point, end_point)", "def TDGradientFunction(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0,lam=[0,0,0,0,0,0]): \n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n grad2 = np.zeros(x2.shape) \n \n #N,dNdB,dNdT = HSRLDerivative(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol,dHmdB,dHmdT = HSRLDerivative(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)\n HSRL_comb,dHcdB,dHcdT = HSRLDerivative(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)\n \n # N,dNdB,dNdnWV,dNdT = WVDIALDerivative(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on,dWVndB,dWVndnWV,dWVndT = WVDIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)\n WV_off,dWVfdB,dWVfdnWV,dWVfdT = WVDIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0) \n \n # N,dNdB,dNdnWV,dNdT = O2DIALDerivative(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on,dO2ndB,dO2ndnWV,dO2ndT = O2DIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)\n O2_off,dO2fdB,dO2fdnWV,dO2fdT = O2DIALDerivative(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)\n \n# HSRLModel,dHSdB,dHSdT = HSRLProfileRatioDeriv(xS[:,0],P,xS[:,2], \\\n# Trx['HSRL Mol'],Trx['HSRL Comb'], \\\n# rb_spec['HSRL'],inu0['HSRL'],GainRatio=xK[0])\n#\n# WVModel,dWVdB,dWVdnWV,dWVdT = WaterVaporProfileRatioDeriv(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['WV'],\n# Trx['WV Online'], Trx['WV Offline'], \\\n# rb_spec['WV Online'],rb_spec['WV Offline'], \\\n# abs_spec['WV Online'],abs_spec['WV Offline'],dr, \\\n# inu0['WV Online'],inu0['WV Offline'],GainRatio=xK[1])\n# \n# O2Model,dO2dB,dO2dnWV,dO2dT = OxygenProfileRatioDeriv(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['O2'],\n# Trx['O2 Online'], Trx['O2 Offline'], \\\n# rb_spec['O2 Online'],rb_spec['O2 Offline'], \\\n# abs_spec['O2 Online'],abs_spec['O2 Offline'],dr, \\\n# inu0['O2 Online'],inu0['O2 Offline'],GainRatio=xK[2])\n \n HSRLmolBase = 1-(Prof['HSRL Mol'])/(HSRL_mol+Prof['HSRL Mol BG'])\n HSRLcombBase = 1-(Prof['HSRL Comb'])/(HSRL_comb+Prof['HSRL Comb BG'])\n WVonBase = 1-(Prof['WV Online'])/(WV_on+Prof['WV Online BG'])\n WVoffBase = 1-(Prof['WV Offline'])/(WV_off+Prof['WV Offline BG'])\n O2onBase = 1-(Prof['O2 Online'])/(O2_on+Prof['O2 Online BG'])\n O2offBase = 1-(Prof['O2 Offline'])/(O2_off+Prof['O2 Offline BG'])\n \n \n# HSRLbase = 2*(HSRLModel-Prof['HSRL'])/ProfVar['HSRL']\n# WVbase = 2*(WVModel-Prof['WV'])/ProfVar['WV']\n# O2base = 2*(O2Model-Prof['O2'])/ProfVar['O2']\n \n # temperature gradient\n grad2[1:,0] = np.nansum(HSRLmolBase[np.newaxis]*dHmdT,axis=1) \\\n + np.nansum(HSRLcombBase[np.newaxis]*dHcdT,axis=1) \\\n + np.nansum(WVonBase[np.newaxis]*dWVndT,axis=1) \\\n + np.nansum(WVoffBase[np.newaxis]*dWVfdT,axis=1) \\\n + np.nansum(O2onBase[np.newaxis]*dO2ndT,axis=1) \\\n + np.nansum(O2offBase[np.newaxis]*dO2fdT,axis=1)\n# # piece wise penalty function \n# gradpen = lam[0]*np.sign(np.diff(xS[:,0]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,0] = grad2[2:,0] + gradpen\n# grad2[1:-1,0] = grad2[1:-1,0] - gradpen\n# piece wise slope penalty function \n gradpen = lam[0]*np.sign(np.diff(np.diff(xS[:,0])))\n gradpen[np.nonzero(np.isnan(gradpen))] = 0\n grad2[3:,0] = grad2[3:,0] + gradpen\n grad2[2:-1,0] = grad2[2:-1,0] - 2*gradpen\n grad2[1:-2,0] = grad2[1:-2,0] + gradpen\n \n # water vapor gradient\n grad2[1:,1] = np.nansum(WVonBase[np.newaxis]*dWVndnWV,axis=1) \\\n + np.nansum(WVoffBase[np.newaxis]*dWVfdnWV,axis=1) \\\n + np.nansum(O2onBase[np.newaxis]*dO2ndnWV,axis=1) \\\n + np.nansum(O2offBase[np.newaxis]*dO2fdnWV,axis=1)\n # piecewise penalty function\n gradpen = lam[1]*np.sign(np.diff(xS[:,1]))\n gradpen[np.nonzero(np.isnan(gradpen))] = 0\n grad2[2:,1] = grad2[2:,1] + gradpen\n grad2[1:-1,1] = grad2[1:-1,1] - gradpen\n \n # backscatter gradient\n grad2[1:,2] = np.nansum(HSRLmolBase[np.newaxis]*dHmdB,axis=1) \\\n + np.nansum(HSRLcombBase[np.newaxis]*dHcdB,axis=1) \\\n + np.nansum(WVonBase[np.newaxis]*dWVndB,axis=1) \\\n + np.nansum(WVoffBase[np.newaxis]*dWVfdB,axis=1) \\\n + np.nansum(O2onBase[np.newaxis]*dO2ndB,axis=1) \\\n + np.nansum(O2offBase[np.newaxis]*dO2fdB,axis=1) \n# #piecewise penalty function\n# gradpen = lam[2]*np.sign(np.diff(xS[:,2]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,2] = grad2[2:,2] + gradpen\n# grad2[1:-1,2] = grad2[1:-1,2] - gradpen\n \n\n # *bsrMult['WV']\n # *bsrMult['WV']\n # *bsrMult['O2']\n # *bsrMult['O2']\n\n # HSRL Common terms\n grad2[1:,3] = np.nansum(HSRLmolBase[np.newaxis]*HSRL_mol,axis=0) + np.nansum(HSRLcombBase[np.newaxis]*HSRL_comb,axis=0)\n# # piece wise penalty function \n# gradpen = lam[3]*np.sign(np.diff(xS[:,3]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,3] = grad2[2:,3] + gradpen\n# grad2[1:-1,3] = grad2[1:-1,3] - gradpen\n \n # WV Common terms\n grad2[1:,4] = np.nansum(WVonBase[np.newaxis]*WV_on,axis=0) + np.nansum(WVoffBase[np.newaxis]*WV_off,axis=0)\n# # piece wise penalty function \n# gradpen = lam[4]*np.sign(np.diff(xS[:,4]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,4] = grad2[2:,4] + gradpen\n# grad2[1:-1,4] = grad2[1:-1,4] - gradpen\n \n # O2 Common terms\n grad2[1:,5] = np.nansum(O2onBase[np.newaxis]*O2_on,axis=0) + np.nansum(O2offBase[np.newaxis]*O2_off,axis=0)\n# # piece wise penalty function \n# gradpen = lam[5]*np.sign(np.diff(xS[:,5]))\n# gradpen[np.nonzero(np.isnan(gradpen))] = 0\n# grad2[2:,5] = grad2[2:,5] + gradpen\n# grad2[1:-1,5] = grad2[1:-1,5] - gradpen\n\n grad2[0,0] = np.nansum(HSRLmolBase*HSRL_mol/xK[0])\n grad2[0,1] = np.nansum(HSRLcombBase*HSRL_comb/xK[1])\n grad2[0,2] = np.nansum(WVonBase*WV_on/xK[2])\n grad2[0,3] = np.nansum(WVoffBase*WV_off/xK[3])\n grad2[0,4] = np.nansum(O2onBase*O2_on/xK[4])\n grad2[0,5] = np.nansum(O2offBase*O2_off/xK[5])\n \n# grad2[0,1] = np.nansum(WVbase*WVModel/xK[1])\n# grad2[0,2] = np.nansum(O2base*O2Model/xK[2])\n \n# OptError = np.nansum(2*(HSRLModel-Prof['HSRL'])/ProfVar['HSRL']*) \\\n# +np.nansum((WVModel-Prof['WV'])**2/ProfVar['WV']) \\\n# +np.sum((O2Model-Prof['O2'])**2/ProfVar['O2'])\n \n return grad2.flatten()", "def gradient(self):\n return ScalingOperator(self.domain, 2.0)", "def getGradient(self,j):\n i = int(self.indicator['term'][j])\n r = int(self.indicator['row'][j])\n c = int(self.indicator['col'][j])\n rv = -np.kron(self.Fstar()[i][:,[r]],self.Astar()[i][[c],:])\n return rv", "def grad(x):\n\n return x - np.arange(1, cfg.n + 1, dtype=np.float_)", "def gradient(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n grad = 4.0 * epsilon * ((-12.0/r) * s12 - (-6/r) * s6)\n grad = 0.5 * (r - 5.0)\n return grad", "def disc_step(real_data,fake_data):\n with tf.GradientTape() as tape:\n loss = discriminator_loss(real_data,fake_data)\n loss = tf.add_n([loss] + discriminator.losses)\n gradients = tape.gradient(loss, discriminator.trainable_variables)\n d_optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))\n return loss", "def grad_step2(self, x, u_prime):\n v = 0.25 * (x @ self.w)\n w = u_prime + v\n z = w.t() @ x\n if self.debug:\n print(f\"-> v={v}\")\n print(f\"-> w={w}\")\n print(f\"-> z={z}\\n\\n\")\n return w, z" ]
[ "0.69143575", "0.6433987", "0.5913618", "0.59055036", "0.5770813", "0.5768853", "0.5699996", "0.5663607", "0.5560883", "0.5526188", "0.5465842", "0.5421312", "0.5409416", "0.54049736", "0.5389346", "0.5362276", "0.53341675", "0.53341675", "0.533105", "0.5320766", "0.5319299", "0.53057826", "0.5305305", "0.52929205", "0.5289533", "0.5278122", "0.52685434", "0.5258744", "0.52479976", "0.52288055" ]
0.6492092
1
Return noisy data based on a polynomail function. The domain of the function is randomly generated, as well as the coefficients of the polynomial.
def get_data_poly_noise(start, stop, noise_rel=0.1, num=50, order=1): x = (stop - start) * np.random.random_sample(size=num) + start #coefficients for the polynomial in [-5,5] poly_coeff = 10 * np.random.random_sample(size=order+1) - 5 #create polynomial y = np.zeros(x.shape) for i in range(order+1): y += poly_coeff[i] * x**i noise_mag = noise_rel * np.abs((np.max(y) - np.min(y))) #add noise in [-noise_mag/2, noise_mag/2] y += noise_mag * np.random.random_sample(size=num) - noise_mag/2 return (x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out", "def noiseless_function(x):\n return 1/(1+np.exp(-x+5))-0.5", "def generate_data(freq, params, model):\n # Generate limit spectrum\n limit_spectrum = model(params)\n \n # Create data with noise from equation (A1) of Anderson (1990)\n data = -limit_spectrum * np.log(np.random.rand(len(freq)))\n return data, limit_spectrum", "def generate_data(response_fn, n=1, n_obs=100):\n\n x = np.array([\n np.linspace(-5, 5, n_obs)\n for _ in range(n)\n ])\n response = response_fn(x) + np.random.normal(0, 0.1, n_obs)\n data_matrix = np.vstack((x, np.ones(n_obs))).T\n return data_matrix, response", "def simdata(x):\n # calculate the theoretical output result\n y = Lorentz(x, x0true, Atrue, Btrue, dtrue)\n # add noise\n global noiselevel\n s = noiselevel\n\n if type(x) == np.ndarray:\n y += s * np.random.randn(len(x))\n else:\n y += s * np.random.randn()\n return y", "def nonlinear_function_dataset(n=100, show_plot=False):\n x = torch.rand(n, 1)*20 - 10 # Random values between [-10 and 10]\n y = (-1/100)*x**7 -x**4 -2*x**2 -4*x + 1 + 0.1*torch.randn(n, 1)\n if show_plot:\n show_TensorFunction1D(x, y, marker='.')\n return TensorDataset(x, y)", "def get_noisy_output_of_system(self, y_without_noise):\n # There were some problems with copying the array data so I just wrote a copy command for every single line\n if self.bOutputNoise:\n if np.size(y_without_noise, 0) == 3:\n y_with_noise = np.zeros(3)\n y_with_noise[0] = y_without_noise[0] + np.random.normal(0, np.sqrt(self.p_var), 1)[0]\n y_with_noise[1] = y_without_noise[1] + np.random.normal(0, np.sqrt(self.e_var), 1)[0]\n y_with_noise[2] = y_without_noise[2] + np.random.normal(0, np.sqrt(self.lamb_var), 1)[0]\n elif np.size(y_without_noise, 0) == 5:\n y_with_noise = np.zeros(5)\n y_with_noise[0] = y_without_noise[0] + np.random.normal(0, np.sqrt(self.p_var), 1)[0]\n y_with_noise[1] = y_without_noise[1] + np.random.normal(0, np.sqrt(self.e_var), 1)[0]\n y_with_noise[2] = y_without_noise[2] + np.random.normal(0, np.sqrt(self.lamb_var), 1)[0]\n y_with_noise[3] = y_without_noise[3] + np.random.normal(0, np.sqrt(self.f_var), 1)[0]\n y_with_noise[4] = y_without_noise[4] + np.random.normal(0, np.sqrt(self.b_var), 1)[0]\n else:\n y_with_noise = y_without_noise\n return y_with_noise", "def generate_overf_noise(amp, index, f0, dt, n):\n\n white_noise = rand.normal(size=n)\n power_spectrum = overf_power_spectrum(amp, index, f0, dt, n)\n # Power spectrum is in physical units of T**2/Hz. Put in discrete units by\n # multiplying by twice the bandwidth.\n power_spectrum *= 1.0/dt\n noise = fft.ifft(fft.fft(white_noise)*sp.sqrt(power_spectrum)).real\n return noise", "def make_noisy_images(image):\r\n return apply_poisson_noise(image, random_state=12345)", "def generate_polynesian_weather_data():\n weather_path = os.path.dirname(os.path.realpath(__file__))\n low_fp = weather_path + \"/polynesia_weather/low/1976/\"\n med_fp = weather_path + \"/polynesia_weather/med/1985/\"\n high_fp = weather_path + \"/polynesia_weather/high/1982/\"\n low_name = \"polynesia_1976\"\n med_name = \"polynesia_1985\"\n high_name = \"polynesia_1982\"\n generate_year_weather_data(low_fp, low_name)\n generate_year_weather_data(med_fp, med_name)\n generate_year_weather_data(high_fp, high_name)", "def f(self, x, coeffs, jitter = 0):\n return np.polyval(np.flip(coeffs), x) + random.uniform(-jitter,jitter)", "def data_fun(times, n_dipoles=4):\n n = 0 # harmonic number\n n_samp = len(times)\n window = np.zeros(n_samp)\n start, stop = [int(ii * float(n_samp) / (2 * n_dipoles)) for ii in (2 * n, 2 * n + 1)]\n window[start:stop] = 1.0\n n += 1\n data = 25e-9 * np.sin(2.0 * np.pi * 10.0 * n * times)\n data *= window\n return data", "def addNoise(data, amp, scale):\n lfnData = addLFNoise(data, amp, scale)\n noisyData = addHFNoise(hfnData, amp)\n\n return noisyData", "def get_noisy_dynamics(self, pulses, dims):\n return pulses", "def noise_data(self, x):\n return x + np.random.normal(size=x.shape)", "def noise(self, freq: int, /) -> None:", "def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)", "def _get_noise(self, shape, dtype=None):", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def get_Pn(f, L, S_lp, S_ac): \r\n # single-link optical metrology noise (Hz^{-1}), Equation (10)\r\n P_oms = S_lp**2 \r\n # single test mass acceleration noise, Equation (11)\r\n P_acc = S_ac**2*(1. + 0.1e-3/f) \r\n # total noise in Michelson-style LISA data channel, Equation (12)\r\n Pn = (P_oms + 4.*P_acc/(2.*pi*f)**4.)/L**2. \r\n return Pn", "def simplestRandom(n):\n # do something \"perlin noise like\" - with various frequency scales\n level1 = numpy.random.randint(0,4,size=4)\n level2 = numpy.random.randn(10)\n level3 = numpy.random.randn(50) * .5\n # make splines for each\n u1 = INTERP.UnivariateSpline(numpy.linspace(0,1,4) ,level1,s=0)\n u2 = INTERP.UnivariateSpline(numpy.linspace(0,1,10),level2,s=0)\n u3 = INTERP.UnivariateSpline(numpy.linspace(0,1,50),level3,s=0)\n # build the signal on the range 0..1 - then use linspace to sample it\n samples = numpy.linspace(0,1,n)\n return numpy.array([u1(u)+u2(u)+u3(u) for u in samples])", "def generate_data(Amp, freq, noise_amp):\n x = np.arange(0.,80.,0.01)\n y = Amp * np.sin(freq * x)\n y += noise_amp * np.random.randn(len(y))\n return x, y", "def get_noisy_dynamics(self, pulses):\n if self.indices is None:\n indices = range(len(pulses))\n else:\n indices = self.indices\n t_max = -np.inf\n t_min = np.inf\n for pulse in pulses:\n t_max = max(max(pulse.tlist), t_max)\n t_min = min(min(pulse.tlist), t_min)\n # create new tlist and random coeff\n num_rand = int(np.floor((t_max - t_min) / self.dt)) + 1\n tlist = (np.arange(0, self.dt*num_rand, self.dt)[:num_rand] + t_min)\n # [:num_rand] for round of error like 0.2*6=1.2000000000002\n\n for i in indices:\n pulse = pulses[i]\n coeff = self.rand_gen(**self.kwargs, size=num_rand)\n pulses[i].add_coherent_noise(\n pulse.qobj, pulse.targets, tlist, coeff)\n return pulses", "def noise_pipeline(images, funcs, DIFFICULTY):\n if DIFFICULTY == 0:\n return images\n else:\n for func in funcs:\n images = func(images, DIFFICULTY)\n return images", "def prob6(n):\n data = np.load('airdata.npy')\n fx = lambda a, b, n: .5*(a+b + (b-a) * np.cos(np.arange(n+1) * np.pi/n))\n a, b = 0, 366 - 1/24\n domain = np.linspace(0, b, 8784)\n pts = fx(a, b, n)\n temp = np.abs(pts - domain.reshape(8784, 1))\n temp2 = np.argmin(temp, axis=0)\n poly = Barycentric(domain[temp2], data[temp2])\n\n plt.ion()\n plt.subplot(121)\n plt.plot(domain, data)\n plt.title(\"Data\")\n plt.subplot(122)\n plt.plot(domain, poly(domain))\n plt.title(\"Interpolation\")\n plt.show()", "def fit_poly(x, y, n=5, log=False):\n \n x_g = x\n x = np.ma.array(x, mask=y.mask).compressed()\n y = y.compressed()\n if log:\n yl = np.log10(y)\n else:\n yl = y\n fit = np.polyfit(x, yl, n)\n p = np.poly1d(fit)\n \n if log:\n return 10**(p(x_g))\n else:\n return p(x_g)", "def base_polynome(numbers):\n\n monomes = [ x**n for n in numbers ]\n polynome = sum(monomes)\n\n return poly(polynome, x)", "def _get_noise(self, shape, dtype=None):\n return np.random.uniform(self._low, self._high, shape).astype(dtype)", "def gen_data(low, high, n_samples, scale=4, test_size=0.2, random_state=3):\n np.random.seed(15)\n X = np.random.uniform(low, high, size=n_samples)\n\n # generate the response from the ground truth function and add\n # some random noise to it, scale controls the variance of the noise.\n y = ground_truth(X) + np.random.normal(scale=scale, size=n_samples)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test" ]
[ "0.6534441", "0.588378", "0.5868336", "0.5808874", "0.5539587", "0.5520163", "0.5374264", "0.5327649", "0.5274025", "0.5241983", "0.52354085", "0.5199385", "0.51953924", "0.51682925", "0.51638764", "0.5158305", "0.5152258", "0.5152128", "0.5151019", "0.51287943", "0.5120953", "0.51018506", "0.5082345", "0.50820315", "0.5073139", "0.5061136", "0.5055374", "0.5053903", "0.5032798", "0.5015715" ]
0.60185647
1
r"""Select ``n`` problematic dataset indices based on weighted cluster losses and distribution.
def select_prob_indices(self, n_select, cl_idxs, idx_loss_cl): log.info("\nSelecting problematic structures") cl_losses = np.array([np.mean(losses) for losses in idx_loss_cl]) cl_pop = np.array([len(_) for _ in cl_idxs]) # Cluster population log.info("Computing cluster loss weights") cl_weights = (cl_losses / np.sum(cl_losses)) * (cl_pop / np.sum(cl_pop)) cl_weights_norm = np.array(cl_weights) / np.sum(cl_weights) # pylint: disable-next=invalid-name Ns = self.n_cl_samples(n_select, cl_weights_norm, cl_pop, cl_losses) log.info("Sampling structures") n_cl = len(cl_losses) prob_idxs = [] for i in range(n_cl): losses = idx_loss_cl[i] idxs = cl_idxs[i] ni = int(Ns[i]) # pylint: disable=invalid-name argmax = np.argsort(-losses)[:ni] prob_idxs.extend(idxs[argmax]) prob_idxs = np.array(prob_idxs) log.debug("Selected dataset indices:") log.log_array(prob_idxs, level=10) return prob_idxs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_indices(self,dataset, cluster_number):\n\t\tself.__init__(dataset, self.k)\n\t\tself.e_step() #got responsibilities\n\t\tmax_cluster = np.argmax(self.w, axis = 1)\n\t\tindices = []\n\t\tfor i in range(dataset.shape[0]):\n\t\t\tif max_cluster[i] == cluster_number:\n\t\t\t\tindices.append(i)\n\t\treturn indices", "def omission_index(n, sample_size):\n \n \"randomly pick some subset of sample_size agents\"\n index = np.sort(np.random.choice(n,sample_size,replace=False))\n \"double up index to choose x and y positions columns. both are used.\"\n index2 = np.repeat(2*index,2) \n \"nudge every second item to take the ith+1 column (y coordinate corresponding to chosen x)\"\n index2[1::2] += 1\n return index, index2", "def getBatch(self, n, rng, dataset):\n pmax = self._root.priority\n step = pmax / n\n indices = np.zeros(n, dtype='int32')\n for i in range(n):\n p = rng.uniform(i*step, (i+1)*step)\n node = self.find(p)\n index = self._checkTerminal(node.position, dataset)\n if (index >= 0):\n indices[i] = index\n else:\n return np.zeros(0)\n\n return indices", "def select(individuals, n):\r\n # return selBest(individuals, n)\r\n return individuals[:n]", "def select_n_random(data, labels, n=100):\n assert len(data) == len(labels)\n\n # TODO: sort this out for 3D data\n # p1 = torch.randperm(len(data))\n # sample_labels = labels[p1][:n]\n # sample_data = data[p1][:n]\n return data[:n], labels[:n]", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def random_infected_nodes(self, n_nodes):\n\n # Randomly get n_nodes number of nodes\n random_sample = self.graph.vertices.rdd.takeSample(False, n_nodes)\n randomly_infected_nodes = self.sqlContext.createDataFrame(random_sample)\n\n # Set infected nodes\n self.set_infected_nodes(randomly_infected_nodes)", "def _select_n(arr, n):\n selection = []\n\n idx = range(0, len(arr))\n for x in range(n):\n if len(idx) == 0:\n break\n i = randint(0, len(idx) - 1)\n selection.append(arr[idx[i]])\n del idx[i]\n\n return selection", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def get_random_inhibitory_weights(self):\n \n self.W_ei=np.zeros((self.N_e,self.N_i))\n self.W_ie=np.zeros((self.N_i,self.N_e)) \n self. W_ii=np.zeros((self.N_i,self.N_i))\n\n \n # connections to the excitatory neurons \n for row_idx in xrange(self.N_e):\n \n # from ihibitory\n all_idxs_ei=np.arange(self.N_i)\n np.random.shuffle(all_idxs_ei)\n self.W_ei[row_idx,all_idxs_ei[0:self.num_conns_ei]]=self.W_max_ei \n \n # connections to inhibitory neurons\n for row_idx in range(self.N_i):\n \n # from exitatory \n all_idxs_ie=np.arange(self.N_e)\n np.random.shuffle(all_idxs_ie)\n self.W_ie[row_idx,all_idxs_ie[0:self.num_conns_ie]]=self.W_max_ie\n \n # from inhibitory\n all_idxs_ii=np.arange(self.N_i)\n np.random.shuffle(all_idxs_ii)\n self.W_ii[row_idx,all_idxs_ii[0:self.num_conns_ii]]=self.W_max_ii\n \n \n self.W[:self.N_e,self.N_e:]=self.W_ei\n self.W[self.N_e:,:self.N_e]=self.W_ie\n self.W[self.N_e:,self.N_e:]=self.W_ii", "def nsel(self, n: int) -> Status:\n# warn('This is deprecated, as pymapdl now provides similar functionality.', DeprecationWarning)#, stacklevel=2)\n result = self._read_inline(f\"nsel({n})\")\n return Status(result)", "def get_random_indexes(msk, n_idxs):\n pos_idxs = np.array(np.where(msk > 0))\n neg_idxs = np.array(np.where(msk == 0))\n n_pos = pos_idxs.shape[1] # number of positives found in the mask\n n_neg = neg_idxs.shape[1] # number of negatives found in the mask\n n_min = min(n_neg, min(n_idxs, n_pos))\n rnd_idxs_pos = range(n_pos)\n np.random.shuffle(rnd_idxs_pos)\n rnd_idxs_neg = range(n_neg)\n np.random.shuffle(rnd_idxs_neg)\n return pos_idxs[:, rnd_idxs_pos[:n_min]], neg_idxs[:, rnd_idxs_neg[:n_min]]", "def _generate_sample_indexes(random_state, n_samples, n_samples_bootstrap):\n # Obtain the random state\n random_state = check_random_state(random_state)\n\n # Obtain the indexes for the samples taking\n # into account the total number of samples\n # and the number of samples to be taken\n sample_indexes = random_state.randint(0, n_samples, n_samples_bootstrap)\n\n # Return them\n return sample_indexes", "def get_cluster_indices(dataset, cluster_number, GMM_model):\n\tGMM_model.data = dataset.copy()\n\tGMM_model.m, GMM_model.n = dataset.shape\n\tGMM_model.w = np.asmatrix(np.empty((GMM_model.m, GMM_model.k), dtype=float))\n\tGMM_model.e_step() #got responsibilities\n\tmax_cluster = np.argmax(GMM_model.w, axis = 1)\n\tindices = []\n\tfor i in range(dataset.shape[0]):\n\t\tif max_cluster[i] == cluster_number:\n\t\t\tindices.append(i)\n\treturn indices", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def create_dataset_splits(n, p=1.0):\n\tperm = np.random.permutation(n).tolist()\n\tidx = int(p * n)\n\treturn perm[:idx]", "def tril_indices(n,k=0):\r\n return mask_indices(n,tril,k)", "def random_partition(n, n_data):\n all_idxs = np.arange(n_data)\n np.random.shuffle(all_idxs)\n idxs1 = all_idxs[:n]\n idxs2 = all_idxs[n:]\n return idxs1, idxs2", "def find_indices_srnn(data, action):\n\n SEED = 1234567890\n rng = np.random.RandomState(SEED)\n\n subject = 5\n subaction1 = 1\n subaction2 = 2\n\n T1 = data[(subject, action, subaction1)].shape[0]\n T2 = data[(subject, action, subaction2)].shape[0]\n prefix, suffix = 50, 100\n\n idx = []\n idx.append(rng.randint(16,T1-prefix-suffix))\n idx.append(rng.randint(16,T2-prefix-suffix))\n idx.append(rng.randint(16,T1-prefix-suffix))\n idx.append(rng.randint(16,T2-prefix-suffix))\n idx.append(rng.randint(16,T1-prefix-suffix))\n idx.append(rng.randint(16,T2-prefix-suffix))\n idx.append(rng.randint(16,T1-prefix-suffix))\n idx.append(rng.randint(16,T2-prefix-suffix))\n\n return idx", "def sample(\n self, n=None, weight_by_clusters=False, random_state=None, **kwargs\n ):\n\n if not isinstance(n, int):\n n = 1\n\n if isinstance(random_state, int):\n np.random.seed(random_state)\n\n # Sort by base 4 id\n if weight_by_clusters:\n df = self.weight_grids(**kwargs)\n else:\n df = self.to_frame()\n\n df = df.sort_values(by='id')\n\n npool = df.shape[0]\n interval = int(np.ceil(npool / n))\n\n # Get a random starting index\n start = np.random.randint(0, high=interval, size=1, dtype=int)[0]\n\n # Get the sample indices\n sample_indices = np.arange(start, npool, interval)\n\n # Get the random grids\n df_sample = df.iloc[sample_indices]\n\n sample_indices = []\n\n # Iterate over the selected grids,\n # get intersecting samples, and\n # select 1 sample within each grid.\n for row in df_sample.itertuples():\n\n # The grid bounds\n bbox = row.geometry.bounds\n\n # Points that intersect the current grid\n point_int = list(self.sindex.intersection(bbox))\n\n # Get one random point within the grid\n sample_indices.append(\n np.random.choice(point_int, size=1, replace=False)[0]\n )\n\n # Get the random points\n return self.dataframe.iloc[sample_indices]", "def spark_index(n):\n return int(round((clamp(n) - minimum) * coefficient))", "def val_train_idxs(n, val_pct=0.2, seed=42):\n#def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):\n np.random.seed(seed)\n n_val = int(val_pct*n)\n #idx_start = cv_idx*n_val\n idxs = np.random.permutation(n)\n # np.random.permutation has two differences from np.random.shuffle:\n # if passed an array, it will return a shuffled copy of the array; np.random.shuffle shuffles the array inplace\n # if passed an integer, it will return a shuffled range i.e. np.random.shuffle(np.arange(n))\n #return idxs[idx_start:idx_start+n_val], idxs[idx_start+n_val,:]\n val = idxs[:n_val]\n trn = idxs[n_val:]\n return val, trn", "def train_test_indices(n, train_ratio=0.7):\n train_split_index = int(train_ratio * n)\n shuffled_indices = np.random.permutation(n)\n train_indices = shuffled_indices[:train_split_index]\n test_indices = shuffled_indices[train_split_index:]\n return train_indices, test_indices", "def mask_indices(n,mask_func,k=0):\r\n m = ones((n,n),int)\r\n a = mask_func(m,k)\r\n return where(a != 0)", "def discover_uncertain(\n self,\n n: int,\n items: List[str],\n embeddings: np.ndarray,\n weights: Optional[List[float]] = None,\n ) -> List[Tuple[float, Tuple[str, str]]]:\n assert self._centroids != {}\n \n # Generate weights if not provided, only consider un-clustered items\n weights = weights if weights else [1, ] * len(items)\n assert len(weights) == len(items)\n \n # Don't consider elements that are already validated\n known_items = set(self._clusters.keys())\n weights = [0 if items[i] in known_items else w for i, w in enumerate(weights)]\n \n # Calculate the similarities to all cluster-centroids\n cluster_ids, cluster_embs = zip(*self._centroids.items())\n cluster_embs = np.vstack(cluster_embs)\n \n # Calculate similarity with cluster centroids and sort\n similarity = cosine_similarity(embeddings, cluster_embs)\n sorted_idx = similarity.argsort(axis=1)\n \n # For each item, check if close to multiple clusters and get the certainty to its closest cluster-centroid\n item_similarities = []\n for i, (indices, w) in enumerate(zip(sorted_idx, weights)):\n second_best, best = indices[-2:]\n item_similarities.append((\n w * similarity[i, best] if similarity[i, second_best] >= self._sim_thr else 0,\n (items[i], cluster_ids[best])\n ))\n \n # Filter out those with a score greater than zero\n options = [(a, b) for a, b in item_similarities if a > 0]\n \n # Return all options if number of options less than desired sample-amount\n if len(options) <= n:\n return options\n \n # Sample options based on score\n weights = [a for a, _ in options]\n chosen_indices = np.random.choice(\n range(len(options)),\n size=n,\n replace=False,\n p=np.asarray(weights, dtype='float32') / sum(weights),\n )\n return [options[idx] for idx in chosen_indices]", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def _select(start, n, label) -> int:\n n_selected = 0\n for i in range(start, int(start + n)):\n x = self._x_positions[i]\n n_selected += self._cols[x].mark_as(label)\n return n_selected", "def triu_indices(n,k=0):\r\n return mask_indices(n,triu,k)", "def n_cl_samples(self, n_sample, cl_weights, cl_pop, cl_losses):\n samples = np.array(cl_weights * n_sample)\n samples = np.floor(samples)\n\n # Check that selections do not sample more than the population\n for i, pop in enumerate(cl_pop):\n if samples[i] > pop:\n samples[i] = pop\n\n # Try to have at least one sample from each cluster\n # (in order of max loss)\n arg_max = (-cl_losses).argsort()\n for i in arg_max:\n if np.sum(samples) == n_sample:\n return samples\n if samples[i] == 0:\n samples[i] = 1\n\n # If there are still not enough samples, we start adding additional\n # samples in order of highest cluster losses.\n for i in arg_max:\n if np.sum(samples) == n_sample:\n return samples\n if samples[i] < cl_pop[i]:\n samples[i] += 1\n\n return samples.astype(int)", "def prob_cl_indices(self, cl_idxs, cl_losses):\n log.info(\"Finding problematic structures\")\n loss_bound = np.mean(cl_losses) # Initial minimum loss\n loss_step = loss_bound / 500\n loss_bound += loss_step\n idxs = []\n while len(idxs) < 1.5 * self.refine_n_cl:\n log.info(\"Minimum cluster loss : %.4f\", loss_bound)\n cl_idxs_prob = np.concatenate(np.argwhere(cl_losses >= loss_bound))\n clusters = np.array(cl_idxs, dtype=object)[cl_idxs_prob]\n idxs = np.concatenate(clusters)\n loss_bound -= loss_step\n log.info(\"N structures included : %d\\n\", len(idxs))\n return idxs" ]
[ "0.6193597", "0.5927715", "0.58556", "0.56422186", "0.5625437", "0.5570782", "0.55194324", "0.54963136", "0.54559964", "0.54301596", "0.54224414", "0.5396253", "0.5388968", "0.5360405", "0.5339347", "0.5339048", "0.5330715", "0.5312012", "0.52913034", "0.528683", "0.52831435", "0.5222134", "0.5217779", "0.5215708", "0.51939505", "0.51923424", "0.51791036", "0.5176516", "0.5176025", "0.51559556" ]
0.6985315
0
r"""Find problematic structures in a dataset. Uses agglomerative and kmeans clustering on a dataset. First, the dataset is split into ``10`` clusters based on atomic pairwise distances. Then each cluster is further split into ``5`` clusters based on energies. Energies and forces are predicted, and then problematic structures are taken from clusters with higherthanaverage losses. Here, the force MSE is used as the loss function. Finally, ``n_find`` structures are sampled from the 100 clusters based on a weighted cluster error distribution.
def find( self, dset, n_find, dset_is_train=True, train_idxs=None, write_json=True, save_cl_plot=True, image_format="png", image_dpi=600, save_dir=".", ): log.info( "---------------------------\n" "| Finding Problematic |\n" "| Structures |\n" "---------------------------\n" ) if write_json: self.json_dict = {} log.info("Loading dataset\n") Z, R, E, F = dset.Z, dset.R, dset.E, dset.F entity_ids, comp_ids = dset.entity_ids, dset.comp_ids # Removing training indices. R_idxs_orig = np.array(list(range(len(R)))) # pylint: disable=invalid-name if dset_is_train: log.info("Dropping indices already in training set") if len(self.models) != 1: log.warning("Cannot drop training indices if there are multiple models") log.warning("Not dropping any indices") assert len(self.models) == 1 if train_idxs is None: try: train_idxs = self.models[0].model_dict["idxs_train"] except Exception as e: raise AttributeError("Training indices were not provided") from e else: assert isinstance(train_idxs, np.ndarray) log.debug("Training indices") log.log_array(train_idxs, level=10) n_Ri = len(R_idxs_orig) # pylint: disable=invalid-name log.info("There are a total of %d structures", n_Ri) R_idxs = np.setdiff1d(R_idxs_orig, train_idxs) n_Rf = len(R_idxs) # pylint: disable=invalid-name log.info("Removed %d structures", n_Ri - n_Rf) else: R_idxs = R_idxs_orig # Note: Indices from this point on do not directly map to their index # in the dataset. We have to convert back to their original indices # when necessary. We refer to R_idxs as no-training indices. # Perform clustering based on pairwise distances and energies R, E, F = R[R_idxs], E[R_idxs], F[R_idxs] R_pd = self.get_pd(R) cl_data = (R_pd, E.reshape(-1, 1)) cl_algos = (clustering.agglomerative, clustering.kmeans) cl_kwargs = ({"n_clusters": 10}, {"n_clusters": 5}) cl_idxs = clustering.cluster_structures(cl_data, cl_algos, cl_kwargs) cl_pop = [len(i) for i in cl_idxs] if write_json: # Convert back to dataset indices just to write. # The no-train indices is still needed to compute errors and # problematic clustering. cl_idxs_write = [np.array(R_idxs[idxs]) for idxs in cl_idxs] self.json_dict["clustering"] = {} self.json_dict["clustering"]["indices"] = cl_idxs_write self.json_dict["clustering"]["population"] = cl_pop log.info("\nPredicting structures") t_prediction = log.t_start() # pylint: disable-next=unbalanced-tuple-unpacking E_pred, F_pred = self.mbe_pred.predict(Z, R, entity_ids, comp_ids) log.t_stop(t_prediction, message="Took {time} s") log.info("Computing prediction errors") E_errors = E_pred - E F_errors = F_pred - F log.debug("Energy errors") log.log_array(E_errors, level=10) log.debug("Force errors") log.log_array(F_errors, level=10) log.info("\nAggregating errors") # pylint: disable-next=invalid-name E_errors_cl = clustering.get_clustered_data(cl_idxs, E_errors) # pylint: disable-next=invalid-name F_errors_cl = clustering.get_clustered_data(cl_idxs, F_errors) # Computing cluster losses loss_kwargs = {"energy": E_errors_cl, "force": F_errors_cl} cl_losses = clustering.get_cluster_losses(self.loss_func, loss_kwargs) if write_json: self.json_dict["clustering"]["loss_function"] = self.loss_func.__name__ self.json_dict["clustering"]["losses"] = cl_losses prob_indices = self.prob_cl_indices(cl_idxs, cl_losses) # Problematic clustering log.info("Refine clustering of problematic structures") # Switching to problematic idxs for clustering. R_pd_prob = R_pd[prob_indices] # pylint: disable=invalid-name cl_data_prob = (R_pd_prob,) cl_algos_prob = (clustering.agglomerative,) cl_kwargs_prob = ({"n_clusters": self.refine_n_cl},) cl_idxs_prob = clustering.cluster_structures( cl_data_prob, cl_algos_prob, cl_kwargs_prob ) # switching back to no-training idxs cl_idxs_prob = [np.array(prob_indices[idxs]) for idxs in cl_idxs_prob] cl_pop_prob = [len(i) for i in cl_idxs_prob] if write_json: # Convert back to dataset indices just to write. cl_idxs_prob_write = [np.array(R_idxs[idxs]) for idxs in cl_idxs_prob] self.json_dict["problematic_clustering"] = {} self.json_dict["problematic_clustering"]["indices"] = cl_idxs_prob_write self.json_dict["problematic_clustering"]["population"] = cl_pop_prob log.info("Aggregating errors for problematic structures") # pylint: disable-next=invalid-name # E_errors_cluster_prob = clustering.get_clustered_data(cl_idxs_prob, E_errors) # pylint: disable-next=invalid-name # F_errors_cluster_prob = clustering.get_clustered_data(cl_idxs_prob, F_errors) # idx_loss_kwargs = {"energy": E_errors, "force": F_errors} structure_loss = np.empty(E_errors.shape) for i in range(len(structure_loss)): # pylint: disable=consider-using-enumerate structure_loss[i] = self.loss_func( {"energy": E_errors[i], "force": F_errors[i]} ) structure_loss_cl = clustering.get_clustered_data(cl_idxs_prob, structure_loss) if write_json: self.json_dict["problematic_clustering"][ "loss_function" ] = self.loss_func.__name__ self.json_dict["problematic_clustering"]["losses"] = structure_loss_cl next_idxs = self.select_prob_indices(n_find, cl_idxs_prob, structure_loss_cl) # Convert back to dataset indices. next_idxs = R_idxs[next_idxs] if write_json: self.json_dict["add_training_indices"] = next_idxs save_json( os.path.join(save_dir, "find_problematic_indices.json"), self.json_dict ) if save_cl_plot: fig = self.plot_cl_losses(cl_pop, cl_losses) fig.savefig( os.path.join(save_dir, f"cl_losses.{image_format}"), dpi=image_dpi ) return next_idxs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def fit(self, ds: loompy.LoomConnection, labels: np.ndarray = None) -> np.ndarray:\n\t\tn_genes, n_cells = ds.shape\n\n\t\t# Find the cluster labels\n\t\tif labels is None:\n\t\t\tlabels = ds.ca[self.labels_attr]\n\t\tn_labels = len(np.unique(labels))\n\t\tlogging.info(f\"Multilevel marker selection with {n_labels} clusters at the leaf level\")\n\n\t\t# Find a good set of levels\n\t\tif self.n_clusters_per_level is None:\n\t\t\tproposal = np.array([25, 10, 5, 2])\n\t\t\tproposal = proposal[proposal < n_labels // 2]\n\t\t\tself.n_clusters_per_level = list(proposal)\n\t\tn_levels = len(self.n_clusters_per_level)\n\t\tif n_levels > 0:\n\t\t\tlogging.info(f\"Analyzing {n_levels} higher level{'s' if n_levels > 1 else ''} with {self.n_clusters_per_level} clusters\")\n\t\t\tmultilevel_markers = np.zeros((ds.shape[0], n_levels))\n\n\t\t\t# Find markers at the leaf level\n\t\t\t(all_markers, all_enrichment, means) = self._fit(ds, labels)\n\t\t\tlogging.info(f\"Found {all_markers.sum()} marker genes at level 0 (leaves)\")\n\n\t\t\t# Agglomerative clustering\n\t\t\tdata = np.log(means + 1)[all_markers, :].T\n\t\t\tD = pdist(data, 'correlation')\n\t\t\tZ = hc.linkage(D, 'complete', optimal_ordering=True)\n\t\t\told_labels_per_cluster = hc.leaves_list(Z)\n\t\t\told_labels_per_cell = labels.copy()\n\n\t\t\t# Select markers at each level\n\t\t\ti = 0\n\t\t\twhile i < n_levels:\n\t\t\t\tnew_labels_per_cluster = hc.cut_tree(Z, n_clusters=self.n_clusters_per_level[i])\n\t\t\t\ttemp = np.zeros_like(labels)\n\t\t\t\tfor lbl in np.unique(old_labels_per_cluster):\n\t\t\t\t\ttemp[old_labels_per_cell == lbl] = new_labels_per_cluster[old_labels_per_cluster == lbl][0]\n\t\t\t\tlabels = temp\n\t\t\t\t(markers, enrichment, _) = self._fit(ds, labels)\n\t\t\t\tlogging.info(f\"Found {markers.sum()} marker genes at level {i + 1}\")\n\t\t\t\tlogging.debug(ds.ra.Gene[markers])\n\t\t\t\tmultilevel_markers[markers, i] = 1\n\t\t\t\tall_markers = (all_markers | markers)\n\t\t\t\ti += 1\n\t\t\tds.ra.MultilevelMarkers = multilevel_markers\n\t\telse:\n\t\t\tlogging.info(\"Not enough clusters for multilevel marker selection (using level 0 markers only)\")\n\t\t\t# Find markers at the leaf level\n\t\t\t(all_markers, all_enrichment, means) = self._fit(ds, labels)\n\t\t\tlogging.info(f\"Found {all_markers.sum()} marker genes at level 0 (leaves)\")\n\n\t\tself.enrichment = all_enrichment\n\t\tselected = np.zeros(ds.shape[0], dtype=bool)\n\t\tselected[np.where(all_markers)[0]] = True\n\t\treturn selected", "def test_determine_k(self):\n test_dir_name = os.path.dirname(__file__)\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"four_clusters.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\"x\", \"y\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 4)\n\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"iris.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\n \"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\",\n \"Petal.Width\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 2)", "def fetch_group_lasso_datasets():\n\n # helper functions\n\n def find_interaction_index(seq, subseq,\n alphabet=\"ATGC\",\n all_possible_len_n_interactions=None):\n n = len(subseq)\n alphabet_interactions = \\\n [set(p) for\n p in list(itertools.combinations_with_replacement(alphabet, n))]\n\n num_interactions = len(alphabet_interactions)\n if all_possible_len_n_interactions is None:\n all_possible_len_n_interactions = \\\n [set(interaction) for\n interaction in\n list(itertools.combinations_with_replacement(seq, n))]\n\n subseq = set(subseq)\n\n group_index = num_interactions * \\\n all_possible_len_n_interactions.index(subseq)\n value_index = alphabet_interactions.index(subseq)\n\n final_index = group_index + value_index\n return final_index\n\n def create_group_indicies_list(seqlength=7,\n alphabet=\"ATGC\",\n interactions=[1, 2, 3],\n include_extra=True):\n alphabet_length = len(alphabet)\n index_groups = []\n if include_extra:\n index_groups.append(0)\n group_count = 1\n for inter in interactions:\n n_interactions = comb(seqlength, inter)\n n_alphabet_combos = comb(alphabet_length,\n inter,\n repetition=True)\n\n for x1 in range(int(n_interactions)):\n for x2 in range(int(n_alphabet_combos)):\n index_groups.append(int(group_count))\n\n group_count += 1\n return index_groups\n\n def create_feature_vector_for_sequence(seq,\n alphabet=\"ATGC\",\n interactions=[1, 2, 3]):\n feature_vector_length = \\\n sum([comb(len(seq), inter) *\n comb(len(alphabet), inter, repetition=True)\n for inter in interactions]) + 1\n\n feature_vector = np.zeros(int(feature_vector_length))\n feature_vector[0] = 1.0\n for inter in interactions:\n # interactions at the current level\n cur_interactions = \\\n [set(p) for p in list(itertools.combinations(seq, inter))]\n interaction_idxs = \\\n [find_interaction_index(\n seq, cur_inter,\n all_possible_len_n_interactions=cur_interactions) + 1\n for cur_inter in cur_interactions]\n feature_vector[interaction_idxs] = 1.0\n\n return feature_vector\n\n positive_url = \\\n \"http://genes.mit.edu/burgelab/maxent/ssdata/MEMset/train5_hs\"\n negative_url = \\\n \"http://genes.mit.edu/burgelab/maxent/ssdata/MEMset/train0_5_hs\"\n\n pos_file = tempfile.NamedTemporaryFile() #bufsize=0)\n neg_file = tempfile.NamedTemporaryFile() #bufsize=0)\n\n posreq = urllib.request.Request(positive_url)\n with urllib.request.urlopen(posreq) as posresponse:\n pos_page = posresponse.read().decode(\"utf-8\")\n\n negreq = urllib.request.Request(negative_url)\n with urllib.request.urlopen(negreq) as negresponse:\n neg_page = negresponse.read().decode(\"utf-8\")\n\n positive_sequences = [str(line.strip().upper()) for idx, line in\n enumerate(pos_page.strip().split('\\n'))\n if \">\" not in line and idx < 2 * 8000]\n\n negative_sequences = [str(line.strip().upper()) for idx, line in\n enumerate(neg_page.strip().split('\\n'))\n if \">\" not in line and\n idx < 2 * len(positive_sequences)]\n\n assert len(positive_sequences) == len(negative_sequences), \\\n \"lengths were not the same: p={pos} n={neg}\" \\\n .format(pos=len(positive_sequences), neg=len(negative_sequences))\n\n positive_vector_matrix = np.array([create_feature_vector_for_sequence(s)\n for s in positive_sequences])\n negative_vector_matrix = np.array([create_feature_vector_for_sequence(s)\n for s in negative_sequences])\n\n df = pd.DataFrame(data=np.vstack((positive_vector_matrix,\n negative_vector_matrix)))\n df.loc[0:positive_vector_matrix.shape[0], \"Label\"] = 1.0\n df.loc[positive_vector_matrix.shape[0]:, \"Label\"] = 0.0\n\n design_matrix = df\n groups = create_group_indicies_list()\n\n return design_matrix, groups", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def prepare_experiment_data_embedded(protein=0, fingerprint=4, K=15, \\\n n_folds=10, max_hashes=300, seed=0, limit=None,\n representation_version = STANDARD,\n collect_statistics=False, experimental=False, calculate_folds=range(10)):\n\n # Load and transform data to buckets\n X, Y = load_svmlight_file(os.path.join(c[\"DATA_DIR\"], proteins[protein]+\"_\"+fingerprints[fingerprint]+\".libsvm\"))\n X = set_representation_by_buckets(X)\n\n\n # Construct sequential lsh indexes labeled by row id. We need those because distance distribution is\n # not well gaussian\n # lsh_thresholds= [0.3,0.4,0.45, 0.5, 0.55, 0.6, 0.65,0.7,0.75, 0.8,0.9]\n\n print \"Constructing lsh_indexes\"\n #lsh_indexes = [construct_LSH_index(protein=protein, fingerprint=fingerprint, threshold=t, max_hashes=max_hashes\\\n # ) for t in lsh_thresholds]\n #\n # if experimental:\n # # Try this\n lsh_indexes = [construct_LSH_index(protein=protein, fingerprint=fingerprint, threshold=i, max_hashes=max_hashes) \\\n for i in np.linspace(0.2, 0.9, 14)]\n #\n # else:\n\n\n #lsh_thresholds= [0.3, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65,0.7, 0.8, 0.9]\n\n\n print \"Constructing lsh_indexes\"\n\n\n #lsh_indexes = [construct_LSH_index(protein=protein, fingerprint=fingerprint, threshold=t, max_hashes=max_hashes\\\n # ) for t in lsh_thresholds]\n\n # Prepare folds ids\n print \"Constructing fold indexes\"\n folds_idx = construct_folds(protein=protein, fingerprint=fingerprint, n_folds=n_folds, seed=seed)\n folds = []\n\n\n\n statistics_len_split = []\n statistics_len_candidates = []\n whole_scans = [0]\n picked_threshold = [0]\n\n # Collected if experimental is ON\n dists_pos_pos = []\n dists_pos_neg = []\n dists_neg_pos = []\n dists_neg_neg = []\n\n # Constructing folds\n for fold_id in calculate_folds:\n fold = folds_idx[fold_id]\n tr_id, ts_id = fold[\"train_id\"], fold[\"test_id\"]\n\n Y_train, Y_test = Y[tr_id], Y[ts_id]\n tr_id_set, ts_id_set = set(tr_id), set(ts_id) #sets for fast filtering\n\n X_train_lsh, X_test_lsh = [], [] # We will construct them row by row\n\n @timed\n def construct_embedding(source, target):\n # Construct train data\n\n itr = source\n\n if limit != None:\n itr = itertools.islice(source, limit)\n\n for row_idx in itr:\n # Query LSHs\n candidates = [list(index.match(X[row_idx].nonzero()[1], label=row_idx)) for index in lsh_indexes]\n\n # Pick closest\n best = []\n best_err = float('inf')\n best_id = -1\n for id, c in enumerate(reversed(candidates)):\n if len(c) > 2*K and abs(len(c) - 2*K) < best_err:\n best_err = abs(len(c) - 2*K)\n best = list(c)\n best_id=id\n\n if len(c) > 2.5*K:\n # this is an imporant heuristic - if it is a reasonably big set accept bigger threshold\n best = list(c)\n best_id=id\n break\n\n picked_threshold.append(best_id)\n\n # Basic filtering (we can look only at training examples)\n candidates = [c for c in best if c != row_idx and c in tr_id_set]\n\n statistics_len_candidates.append(len(candidates))\n\n candidates_sims = []\n\n if len(best): # <=> we found a good LSH threshold\n # Caching result\n candidates_sims = np.array([jaccard_similarity_score_fast(X[row_idx], X[idx]) \\\n for idx in candidates])\n else:\n candidates_sims = np.array([jaccard_similarity_score_fast(X[row_idx], X[idx]) \\\n for idx in tr_id])\n candidates = tr_id\n whole_scans[0] += 1\n\n # Sort and get K closests in relative indexes (for fast query, optimization)\n candidates_relative = sorted(range(len(candidates))\\\n , key=lambda idx: -candidates_sims[idx] )[0:K] # decreasing by default so reverse\n\n # Get dists\n candidates_pos_dists = np.array([candidates_sims[idx] for idx in candidates_relative if Y[candidates[idx]]==1])\n candidates_neg_dists = np.array([candidates_sims[idx] for idx in candidates_relative if Y[candidates[idx]]==-1])\n\n if collect_statistics:\n if Y[row_idx] == 1:\n dists_neg_pos.append(candidates_neg_dists)\n dists_pos_pos.append(candidates_pos_dists)\n\n if Y[row_idx] == -1:\n dists_neg_neg.append(candidates_neg_dists)\n dists_pos_neg.append(candidates_pos_dists)\n\n if representation_version == FIX_SCALING:\n target.append([len(candidates_pos_dists)/(0.1 + float(len(candidates_relative))), \\\n len(candidates_neg_dists)/(0.1 + float(len(candidates_relative))), \\\n candidates_pos_dists.mean() if len(candidates_pos_dists) else 0.0, \\\n candidates_pos_dists.min() if len(candidates_pos_dists) else 0.0, \\\n candidates_pos_dists.max() if len(candidates_pos_dists) else 0.0,\\\n candidates_neg_dists.mean() if len(candidates_neg_dists) else 0.0, \\\n candidates_neg_dists.max() if len(candidates_neg_dists) else 0.0, \\\n candidates_neg_dists.min() if len(candidates_neg_dists) else 0.0] )\n elif representation_version == STANDARD:\n target.append([len(candidates_pos_dists), \\\n len(candidates_neg_dists), \\\n candidates_pos_dists.mean() if len(candidates_pos_dists) else 0.0, \\\n candidates_pos_dists.min() if len(candidates_pos_dists) else 0.0, \\\n candidates_pos_dists.max() if len(candidates_pos_dists) else 0.0,\\\n candidates_neg_dists.mean() if len(candidates_neg_dists) else 0.0, \\\n candidates_neg_dists.max() if len(candidates_neg_dists) else 0.0, \\\n candidates_neg_dists.min() if len(candidates_neg_dists) else 0.0] )\n\n construct_embedding(list(tr_id), X_train_lsh)\n print \"Calculating \",protein, fingerprint\n construct_embedding(list(ts_id), X_test_lsh)\n\n folds.append({\"X_train\": np.array(X_train_lsh), \"X_test\":np.array(X_test_lsh), \"Y_train\":Y_train, \"Y_test\":Y_test})\n\n return {\"folds\":folds, \"folds_idx\":folds_idx, \\\n \"len_candidates\":statistics_len_candidates, \"len_split\": statistics_len_split,\\\n \"whole_scans\": whole_scans[0], \"picked_threshold\":picked_threshold,\n }, {\"examples\": X.shape[0]}", "def clusters_based_on_LoS():\n algorithm = 'LoS'\n tree_filename = '../data/pickle/pneumonia_tree_without_electrolytes_min_supp_0_05.pickle'\n entity_list = read_json('../data/json/pneumonia_entity_list.json')\n mat = prepare_matrix(tree_filename, len(entity_list))\n\n length_of_stays = length_of_stay('../csv/pneumonia_admissions.csv', show_plot=False)\n borders = (7, 15, 30)\n groups = make_groups(length_of_stays, borders)\n\n labels = np.zeros(len(entity_list), dtype='int')\n labels[groups[0]] = 0\n labels[groups[1]] = 1\n labels[groups[2]] = 1\n labels[groups[3]] = 1\n\n visualize_clusters_in_2D(mat, labels, algorithm, None, show_annotations=False)", "def findClusters(data):\n\tcentroids = data[0, None]\n\tmin_R = 0.1\n\n\tfor _ in range(8):\n\t\tdists = np.linalg.norm(data[:, None, :] - centroids[None, :, :], axis=-1)\n\t\tpotentials = (1 / dists).sum(axis=1)\n\n\t\tnew_c_idx = np.argmin(potentials)\n\n\t\tif np.min(dists[new_c_idx]) < min_R:\n\t\t\t# if this is close to an existing centroid, stop finding centroids\n\t\t\tbreak\n\n\t\tcentroids = np.concatenate([centroids, data[new_c_idx, None]], axis=0)\n\n\tax.scatter(*centroids.T, color='tab:orange')\n\n\t# run a single k-means to find the centroid of each cluster\n\tk = centroids.shape[0]\n\tdists = np.linalg.norm(data[:, None, :] - centroids[None, :, :], axis=-1)\n\tclosest_centroid = np.argmin(dists, axis=-1)\n\n\tfor n in range(k):\n\t\tnew_centroid = data[closest_centroid == n].mean(axis=0)\n\t\tcentroids[n] = new_centroid\n\tprint(centroids)\n\tax.scatter(*centroids.T, color='tab:blue')", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def calc_skill_clusters(blocked_days, GTD, GTD_seas, persis_thresh, SOM_nodes, SOM_clusters_block, seas):\r\n ds_arr_ones = []\r\n for clus in SOM_clusters_block:\r\n node_cluster_set_test = [clus]\r\n node_cluster_set_test_str = [str(clus).replace(',', '') for clus in node_cluster_set_test]\r\n #calculate the blocked days which the new cluster determines\r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodes, node_cluster_set_test_str)\r\n #define as DataArray and select JJA to remove the extended days included for classifying blocks\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims={\"time\": GTD['time']})\r\n blocked_days_clus_xr['time'] = GTD['time']\r\n blocked_days_clus_seas = blocked_days_clus_xr.sel(time=np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_seas, blocked_days_clus_seas)\r\n #calculate precision, recall and F1\r\n if len(str(node_cluster_set_test)) == 1:\r\n comb_str = f\"{node_cluster_set_test[0]}\".replace(\"'\", \"\")\r\n else:\r\n comb_str = f\"{str(node_cluster_set_test)[1:-1]}\".replace(\"'\", \"\") \r\n ds=xr.Dataset({'precision': prec, 'recall': recall, 'F1': F1, 'clus_num': int(len(node_cluster_set_test)), 'set': str(comb_str)})\r\n ds_arr_ones.append(ds)\r\n blocks_one_clusnum = xr.concat(ds_arr_ones, dim = \"set\")\r\n return blocks_one_clusnum", "def generate_clusters(df):\n\n df_size = df.shape[0]\n print(df_size)\n n_clusters = 0\n percent_min_pts = 0.105\n min_clusters = 3\n while (n_clusters != min_clusters):\n print(\"percent_min_pts\", percent_min_pts)\n min_cluster_pts = math.floor(df_size * percent_min_pts)\n print(\"min_cluster_pts\", min_cluster_pts)\n\n clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_pts)\n print(df.head())\n clusterer.fit(df)\n cluster_groups = {}\n labels = clusterer.labels_\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n n_clusters = len(set(labels))\n print(\"n_clusters\", n_clusters)\n multiplier = abs(n_clusters - min_clusters) * 0.001\n print(\"multiplier\", multiplier)\n if n_clusters > min_clusters:\n percent_min_pts += multiplier\n else:\n percent_min_pts -= multiplier\n print(\"percent_min_pts\", percent_min_pts)\n return labels", "def hierarchical_k_means(X, n_clusters):\n\n n_big_clusters = int(np.sqrt(n_clusters))\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_big_clusters, batch_size=1000,\n n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X)\n coarse_labels = mbk.labels_\n fine_labels = np.zeros_like(coarse_labels)\n q = 0\n for i in range(n_big_clusters):\n n_small_clusters = int(\n n_clusters * np.sum(coarse_labels == i) * 1. / X.shape[0])\n n_small_clusters = np.maximum(1, n_small_clusters)\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_small_clusters,\n batch_size=1000, n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X[coarse_labels == i])\n fine_labels[coarse_labels == i] = q + mbk.labels_\n q += n_small_clusters\n\n return _remove_empty_labels(fine_labels)", "def test_optimalk_cluster_array_vs_data_sizes_error():\n import numpy as np\n from sklearn.datasets.samples_generator import make_blobs\n from gap_statistic import OptimalK\n\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=None, n_jobs=-1)\n\n # Create data\n X, y = make_blobs(n_samples=5, n_features=2, centers=3)\n\n with pytest.raises(ValueError) as excinfo:\n optimalK(X, cluster_array=np.arange(1, 10))\n assert 'The number of suggested clusters to try' in str(excinfo.value)", "def get_optimal_clusters(cell,threshold=140):\n\n\t#\tTurn image to numpy array\n\tpic = image_to_matrix(cell)\n\n\t#\tGet the array of coordinates of dark dots\n\tdots = get_threshold_dots(pic,threshold)\n\n\tscores = []\n\n\tfor n_clusters in range(1,10):\n\t\tclusters = kmeans.kmeans(pic,pic.shape[0],pic.shape[1],50,n_clusters,threshold)\n\t\tprint clusters\n\n\t\tsquare_sum_array = [0]*n_clusters\n\t\tcount_array = [0]*n_clusters\n\n\t\tfor dot in dots:\n\t\t\tdistance_array = [kmeans.euclid_distance(dot,cluster) for cluster in clusters]\n\t\t\tmin_index = distance_array.index(min(distance_array))\n\t\t\tsquare_sum_array[min_index] += kmeans.euclid_distance(clusters[min_index],dot)\n\t\t\tcount_array[min_index] += 1\n\n\t\tvariances = [square_sum/(count+0.001) for square_sum, count in zip(square_sum_array,count_array)]\n\n\t\tprint variances\n\t\tscores.append(sum(variances)/len(variances))\n\n\treturn scores", "def byMedoids(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n medoids = dataset.sample(number_of_clusters) # randomly select medoids from dataset\n\n if verbosity >= 1:\n print(\"INITIAL MEDOIDS\")\n print(medoids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1:\n print(\"ITERATIONS\")\n print(iterations)\n\n clusters = Cluster.calcClusters(dataset, medoids, number_of_clusters, verbosity=verbosity,\n class_header=class_header) # Assign all points to a cluster\n\n base_distortion = Cluster.calcDistortion(medoids, clusters, class_header=class_header)\n # Find base distortion\n\n set_list = [] # set up multiprocessing structures\n work_list = []\n change_list = []\n\n for medoid_row_index, medoid_tuple in enumerate(medoids.iterrows()): # For each medoid\n medoid_frame_index = medoid_tuple[0]\n for datum_index, datum in clusters[medoid_row_index].iterrows(): # For each point in the medoid cluster\n if medoid_frame_index != datum_index: # Do not try to swap a medoid with itself\n temp = medoids.copy() # Make a copy of the medoids DataFrame\n temp.iloc[medoid_row_index] = datum # Swap the medoid in the copy\n temp.index.values[medoid_row_index] = datum.name\n work_list.append((temp, clusters, class_header)) # add calculation arguments to work list\n change_list.append((medoid_row_index, datum)) # add swap info to change list\n\n multiprocess_count = multiprocessing.cpu_count() # Find cpu count\n partition_size = math.ceil(len(work_list) / multiprocess_count) # find size of work list partitions\n if verbosity >= 1: # optionally print work list length\n print(\"Work list length:\")\n print(len(work_list))\n for i in range(multiprocess_count - 1): # repeat for every subset\n sample = work_list[i * partition_size: (i + 1) * partition_size] # take a subset of the work list\n set_list.append(sample) # add that subset as an item in the set list\n set_list.append((work_list[(multiprocess_count - 1) * partition_size:])) # add tailing subset to set list\n if verbosity > 2: # optionally print entire set list.\n print(\"Set list\")\n print(set_list)\n pool = multiprocessing.Pool(processes=multiprocess_count) # create multiprocessing pool\n distortion_lists = pool.map(Cluster.calcDistortionList, set_list) # map set list to processing pool\n pool.close()\n pool.join()\n #print(distortion_lists)\n distortions = sum(distortion_lists, [])\n #print(distortions)\n\n break_flag = True # set break flag in case there are no good changes\n distortion_index = 0\n for medoid_row_index, _ in enumerate(medoids.iterrows()): # For each medoid\n cluster_size = len(clusters[medoid_row_index])\n distortions_subset = distortions[distortion_index: distortion_index + cluster_size]\n distortion_index += cluster_size # keep track of how far we are through the change list\n if len(distortions_subset) != 0: # did this cluster have any possible changes\n best_distortion = min(distortions_subset) # pick the best distortion\n if best_distortion < base_distortion: # if that distortion is better than our old distortion\n best_dist_index = distortions.index(best_distortion)\n best_change = change_list[best_dist_index] # apply the change for that distortion.\n else:\n best_change = None\n else:\n best_change = None\n if verbosity > 0: # Optionally print best changes\n print(\"MEDOIDS\")\n print(medoids)\n print(\"BEST_CHANGE\")\n print(best_change)\n if best_change is not None: # make sure there is a change before trying to make it.\n medoids.iloc[best_change[0]] = best_change[1] # swap best change into medoids list\n medoids.index.values[best_change[0]] = best_change[1].name\n break_flag = False\n\n if break_flag: # if we made no changes then the clustering is settled.\n break\n\n medoids = medoids.drop_duplicates() # make sure we do not duplicate medoids\n if return_clusters is True: # optionally return clusters\n return medoids, clusters\n pass\n else:\n return medoids # return medoids dataframe", "def _fit(\n self,\n x,\n clusters=50,\n a=5,\n Niter=15,\n device=None,\n backend=None,\n approx=False,\n n=50,\n ):\n if type(clusters) != int:\n raise ValueError(\"Clusters must be an integer\")\n if clusters >= len(x):\n raise ValueError(\"Number of clusters must be less than length of dataset\")\n if type(a) != int:\n raise ValueError(\"Number of clusters to search over must be an integer\")\n if a > clusters:\n raise ValueError(\n \"Number of clusters to search over must be less than total number of clusters\"\n )\n if len(x.shape) != 2:\n raise ValueError(\"Input must be a 2D array\")\n if self.__normalise:\n x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(\n -1, x.shape[1]\n )\n\n # if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric\n if approx and self.__metric == \"angular\":\n self.__update_metric(\"angular_full\")\n\n x = self.tools.contiguous(x)\n self.__device = device\n self.__backend = backend\n\n cl, c = self.tools.kmeans(\n x,\n self.__distance,\n clusters,\n Niter=Niter,\n device=self.__device,\n approx=approx,\n n=n,\n )\n\n self.__c = c\n cl = self.__assign(x)\n\n ncl = self.__k_argmin(c, c, k=a)\n self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)\n\n x, x_labels = self.__sort_clusters(x, cl, store_x=True)\n self.__x = x\n r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)\n self.__keep = self.tools.to(\n self.tools.zeros([clusters, clusters], dtype=bool), self.__device\n )\n self.__keep[r, ncl.flatten()] = True\n\n return self", "def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )", "def fit_predict(self, indexes, dataset_obj, sample_weight=None, sort_by_distance_to_mean=False):\n\n # Query data\n query_data = dataset_obj.data_matx[dataset_obj.query_idx]\n query_ids = dataset_obj.query_idx\n # Gallery data\n gallery_data = dataset_obj.data_matx[indexes]\n gallery_ids = indexes\n\n logging.info('Finding cluster mean positions.')\n # Fitted is the gallery id cluster labels in order\n fitted = sk_kmeans.fit_predict(\n self, dataset_obj.data_matx[indexes], None, sample_weight=sample_weight)\n logging.info('Done')\n cluster_means = self.cluster_centers_\n # Cluster ids for each different class\n cluster_ids = [[x for x in range(len(cluster_means))] for i in range(len(query_ids))]\n\n # Measure distances to cluster centres\n cluster_distance_matrix = pairwise_distances(query_data, cluster_means, metric=self.metric)\n\n cluster_ids_swapped = swap_indices(cluster_ids)\n\n cluster_gallery_ids = []\n cluster_gallery_data = []\n for cluster in range(len(cluster_ids_swapped)):\n valid_cluster_gallery_ids = gallery_ids[fitted == cluster]\n valid_cluster_gallery_data = dataset_obj.data_matx[valid_cluster_gallery_ids]\n cluster_gallery_ids.append(valid_cluster_gallery_ids)\n cluster_gallery_data.append(valid_cluster_gallery_data)\n\n gallery_distances_per_cluster = []\n for cluster in cluster_gallery_data:\n # Take only the gallery ids in the cluster\n gallery_distance_for_cluster = pairwise_distances(query_data, cluster, metric=self.metric)\n gallery_distances_per_cluster.append(gallery_distance_for_cluster)\n\n gallery_distances_per_cluster_swapped = swap_indices(gallery_distances_per_cluster) \n\n cluster_gallery_ids_stacked = [cluster_gallery_ids for i in range(len(gallery_distances_per_cluster_swapped))]\n\n sorted_gallery_distances_per_query = []\n sorted_gallery_ids_per_query = []\n for cluster_distances, gallery_distances, gallery_ids, index in zip(cluster_distance_matrix, gallery_distances_per_cluster_swapped, cluster_gallery_ids_stacked, range(len(cluster_distance_matrix))):\n sorted_gallery_distances_per_query.append(sort_by_another(gallery_distances, cluster_distances))\n sorted_gallery_ids_per_query.append(sort_by_another(gallery_ids, cluster_distances))\n\n num_query_items = len(sorted_gallery_distances_per_query)\n num_clusters = len(gallery_ids)\n num_gallery_items = len(gallery_data)\n\n double_sorted_gallery_distances_per_query = [[] for i in range(num_query_items)]\n double_sorted_gallery_ids_per_query = [[] for i in range(num_query_items)]\n for query_item, query_item_id, index1 in zip(sorted_gallery_distances_per_query, sorted_gallery_ids_per_query, range(len(sorted_gallery_distances_per_query))):\n for cluster, cluster_id, index2 in zip(query_item, query_item_id, range(len(query_item))):\n sorted_gallery_distances = sort_by_another(cluster, cluster)\n sorted_gallery_ids = sort_by_another(cluster_id, cluster)\n double_sorted_gallery_distances_per_query[index1].append(sorted_gallery_distances)\n double_sorted_gallery_ids_per_query[index1].append(sorted_gallery_ids)\n\n final_distance_array = []\n final_ids_array = []\n for distances, indexes in zip(double_sorted_gallery_distances_per_query, double_sorted_gallery_ids_per_query):\n final_distance_array.append([item for sublist in distances for item in sublist])\n final_ids_array.append([item for sublist in indexes for item in sublist])\n\n final_distance_array = np.array(final_distance_array)\n final_ids_array = np.array(final_ids_array)\n\n final_updated_distance_array = []\n final_updated_ids_array = []\n for distances, indexes, query_id in zip(final_distance_array, final_ids_array, range(num_query_items)):\n mask = [id_is_valid(gal_id, query_id, dataset_obj) for gal_id in indexes]\n redone_distances = np.append(distances[mask], ([-1] * 20))[:num_gallery_items]\n redone_indexes = np.append(indexes[mask], ([-1] * 20))[:num_gallery_items]\n final_updated_distance_array.append(redone_distances)\n final_updated_ids_array.append(redone_indexes)\n\n final_updated_distance_array = np.array(final_updated_distance_array)\n final_updated_ids_array = np.array(final_updated_ids_array)\n\n def gal_to_label(row_of_ids):\n return dataset_obj.labels[row_of_ids]\n\n final_updated_labels_array = np.stack([gal_to_label(row) for row in final_updated_ids_array])\n tensor_array = torch.tensor(np.array(final_updated_labels_array, dtype=np.int32))\n\n ranks = torch.stack([get_rank(row, i, dataset_obj) for i, row in enumerate(tensor_array)]).numpy()\n ranked_count = np.bincount(ranks.flatten())[1:-1]\n # CMC curve (percentage of query items which were in any particular rank or below)\n self.ranked_acc = np.cumsum(ranked_count / dataset_obj.query_idx.shape[0])\n\n return self", "def find_clusters():\n clusters = ecs_client.list_clusters()['clusterArns']\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n logging.debug(\"Retrieved %i clusters\" % (len(clusters)))\n for cluster in clusters:\n ratio = SequenceMatcher(\n lambda item:\n item == \" \",\n \"arn:aws:ecs:us-east-1*cluster/default\",\n cluster\n ).ratio()\n if ratio < 0.82:\n cluster_short = cluster.split(\"/\")[1]\n if args.cluster and cluster_short != args.cluster:\n continue\n ecs_data[cluster_short] = {}\n logging.debug(\"Cluster: %s\" % (cluster))\n instance_arns = ecs_client.list_container_instances(\n cluster=cluster\n )['containerInstanceArns']\n instances = ecs_client.describe_container_instances(\n cluster=cluster,\n containerInstances=instance_arns\n )['containerInstances']\n logging.debug(\"Retrieved %i cluster instances\" % (len(instances)))\n for instance in instances:\n ecs_data[cluster_short][instance['ec2InstanceId']] = {\n 'instance_id': instance['ec2InstanceId'],\n 'cluster': cluster_short,\n 'containers': []\n }\n logging.debug(\"\\tLooking for tasks in (%s): %s %s\" % (instance_data[instance['ec2InstanceId']]['name'], instance_data[instance['ec2InstanceId']]['id'], instance['containerInstanceArn']))\n tasks = ecs_client.list_tasks(\n cluster=cluster,\n containerInstance=instance['containerInstanceArn'],\n )['taskArns']\n logging.debug(\"Retrieved %i cluster tasks\" % (len(tasks)))\n for task in tasks:\n containers = ecs_client.describe_tasks(\n cluster=cluster,\n tasks=[task]\n )['tasks']\n for container in containers:\n if args.action != \"list\":\n if container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0] == args.task:\n if args.action == \"ssh\":\n if args.random:\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"sshing to %s\" % (instance['ec2InstanceId']))\n print('*** Initiating Host Interactive Session\\n')\n interactive().connect(instance_data[instance['ec2InstanceId']]['private_ip'],'')\n sys.exit(0)\n if args.action == \"enter\":\n if args.random:\n logging.debug(\"Recording host %s for random selection\" % (instance['ec2InstanceId']))\n hosts.append(instance['ec2InstanceId'])\n else:\n logging.debug(\"connect to %s -> %s\" % (instance['ec2InstanceId'],container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0]))\n print '*** Initiating Container Interactive Session\\n'\n interactive().docker_enter(args.user, instance_data[instance['ec2InstanceId']]['private_ip'],args.task)\n sys.exit(0)\n if args.action == \"list\":\n logging.debug(\"%s matched arg(%s): %s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.action, instance['ec2InstanceId']))\n ecs_data[cluster_short][instance['ec2InstanceId']]['containers'].append(container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0])\n # logging.info(\"%s:%s\" % (container['taskDefinitionArn'].split(\"/\")[1].split(\":\")[0], args.task))\n return True", "def cluster_by_split(filtered_df):\n global features_in_range\n global table\n # make a copy of the entire data set\n unfiltered_df = table\n # get total number of robot faces in data set\n total_rows = len(unfiltered_df)\n\n # drop any column that is not included in our list of 11 features\n # 11 features = 16 features with no dependencies filtered via 20-80% range\n for col in unfiltered_df:\n if not unfiltered_df[col].name in features_in_range:\n unfiltered_df = unfiltered_df.drop(unfiltered_df[col].name, 1)\n\n # iterate over the dataframe of columns generated by the range\n for col in filtered_df:\n try:\n # for each column, call groupby() and calculate percentage\n check_for_20 = unfiltered_df.groupby(col).size().reset_index(name='count')\n check_for_20['as_percent'] = 100 * check_for_20['count'] / float(total_rows)\n # ignore feature values that represent less than 20% of all faces\n cluster_by_feature = check_for_20[check_for_20['as_percent'] >= 20]\n # if feature has values over 20%, iterate over\n # each feature_value and generate clusters\n if not cluster_by_feature.empty:\n # iterate over every value of the feature\n for index, row in cluster_by_feature.iterrows():\n # use feature value to call groupby() on the entire data set\n results = unfiltered_df[unfiltered_df[col] == row[0]]\n results = results \\\n .groupby(list(unfiltered_df)) \\\n .size() \\\n .reset_index(name='count')\n # calculate count as a percentage\n results['as_percent'] = 100 * results['count'] / float(total_rows)\n results = results.sort_values(by='as_percent', ascending=False)\n # store results in a .tsv file\n filename = str(col) + \"_\" + str(row[0]) + '_feature_cluster.tsv'\n results.to_csv(filename.replace(\"/\", \"-\"), header=True, sep='\\t')\n print(\"results written to file\")\n except:\n # 'count' and 'percentage' columns will generate errors\n # since they don't exist in the original data set\n pass", "def problem2(dataset_path):\n\n # to achieve this, we use Silhouette Index\n km = KMeans(init=\"k-mean++\", csv_path=dataset_path, n_init=5)\n\n dfs = []\n cs = []\n for i in range(2, 9):\n cs.append(i)\n km.n_clusters = i\n dfs.append(km.fit_predict_from_csv())\n\n iv = InternalValidator(dfs, cluster_nums=cs)\n iv.make_silhouette_table()\n iv.show_silhouette_plot()\n\n iv.make_cvnn_table()\n iv.show_cvnn_plot()", "def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")", "def optimalK(data, nrefs=3, maxClusters=15):\r\n gaps = np.zeros((len(range(1, maxClusters)),))\r\n resultsdf = pd.DataFrame({'clusterCount':[], 'gap':[]})\r\n for gap_index, k in enumerate(range(1, maxClusters)):\r\n\r\n # Holder for reference dispersion results\r\n refDisps = np.zeros(nrefs)\r\n\r\n # For n references, generate random sample and perform kmeans getting resulting dispersion of each loop\r\n for i in range(nrefs):\r\n\r\n # Create new random reference set\r\n randomReference = np.random.random_sample(size=data.shape)\r\n\r\n # Fit to it\r\n km = KMeans(k)\r\n km.fit(randomReference)\r\n\r\n refDisp = km.inertia_\r\n refDisps[i] = refDisp\r\n\r\n # Fit cluster to original data and create dispersion\r\n km = KMeans(k)\r\n km.fit(data)\r\n print(k)\r\n\r\n origDisp = km.inertia_\r\n\r\n # Calculate gap statistic\r\n gap = np.log(np.mean(refDisps)) - np.log(origDisp)\r\n\r\n # Assign this loop's gap statistic to gaps\r\n gaps[gap_index] = gap\r\n\r\n resultsdf = resultsdf.append({'clusterCount':k, 'gap':gap}, ignore_index=True)\r\n\r\n\r\n return (gaps.argmax() + 1, resultsdf) # Plus 1 because index of 0 means 1 cluster is optimal, index 2 = 3 clusters are optimal\r", "def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster", "def cluster(players_df, columns):\n\toptimal_n=None\n\toptimal_clusters=None\n\toptimal_clusterer=None\n\toptimal_silhouette=-99\n\tfor n in range(2,9):\n\t\tclusterer=KMeans(n_clusters=n)\n\t\tcluster_labels=clusterer.fit_predict(players_df[columns])\n\t\tavg_silhouette=silhouette_score(players_df[columns], cluster_labels)\n\t\tprint('The avg silhouette score for {} clusters is {}'.format(n, avg_silhouette))\n\t\tif avg_silhouette > optimal_silhouette:\n\t\t\toptimal_silhouette=avg_silhouette\n\t\t\toptimal_clusterer=clusterer\n\t\t\toptimal_clusters=cluster_labels\n\t\t\toptimal_n=n\n\tprint('Returning optimal clusters found with n={}'.format(optimal_n))\n\tclusters = {n: [] for n in range(optimal_n)}\n\tfor i, label in enumerate(optimal_clusters):\n\t\tclusters[label].append(\n\t\t\tdict(\n\t\t\t\tplayer_id=players_df.iloc[i]['PERSON_ID'],\n\t\t\t\tfirst_name=players_df.iloc[i]['DISPLAY_LAST_COMMA_FIRST'].split()[-1],\n\t\t\t\tlast_name=players_df.iloc[i]['DISPLAY_LAST_COMMA_FIRST'].split()[0],\n\t\t\t\t)\n\t\t\t)\n\treturn clusters", "def get_number_of_clusters(df, use_pca, n_components):\n n_clusters = 10\n cluster_with_distances = []\n for i in range(n_clusters):\n pipe = _build_model(df, use_pca, n_components, use_kmeans=True, n_clusters=i + 1)\n cluster_with_distances.append(pipe.named_steps['kmeans'].inertia_)\n plt.figure(6, figsize=(12, 6))\n plt.plot(range(1, 11), cluster_with_distances, 'o')\n plt.plot(range(1, 11), cluster_with_distances, '-', alpha=0.5)\n plt.title('The Elbow Criterion')\n plt.xlabel('number of cluster')\n plt.ylabel('Sum of squared distances of samples to their closest cluster center')\n plt.show()", "def kmeans_001(fit_centroids=False):\n trainX = np.memmap('data/train_cropped_150.memmap', mode='r', shape=(N_TRAIN, 150, 150, 3))\n # Not used yet\n testX = np.memmap('data/test_cropped_150.memmap', mode='r', shape=(N_TEST, 150, 150, 3))\n\n if fit_centroids:\n km = models.KMeansFeatures.KMeansFeatures(rf_size=6, num_centroids=1600, num_patches=400000)\n km.fit(trainX)\n\n km.save_to_file('mdl_kmeans_ridge_rf_001')\n # t0 = time.time()\n # pickle.dump(km, open('data/kmeans_centroids.pkl', mode='wb'))\n # print 'Pickling the KMeansFeatures object took {0} seconds'.format(time.time() - t0)\n else:\n km = models.KMeansFeatures.KMeansFeatures.load_from_file('mdl_kmeans_ridge_rf_001')\n # km = pickle.load(open('data/kmeans_centroids.pkl'))\n\n n = 10000\n\n train_x = km.transform(trainX[0:n, :])\n train_y = classes.train_solutions.data[0:n, :]\n # train_x = km.transform(trainX)\n # train_y = classes.train_solutions.data\n\n logger.info(\"Train x shape: {}\".format(train_x.shape))\n logger.info(\"Train y shape: {}\".format(train_y.shape))\n\n kf = KFold(n, n_folds=2, shuffle=True)\n\n for train, test in kf:\n # clf = models.Ridge.RidgeRFEstimator()\n # clf.rf_rgn = RandomForestRegressor(n_estimators=250, n_jobs=4, verbose=3)\n clf = RandomForestRegressor(n_estimators=20, n_jobs=4, verbose=3, random_state=0, oob_score=True)\n clf.fit(train_x[train], train_y[train])\n res = clf.predict(train_x[test])\n classes.rmse(train_y[test], res)", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings" ]
[ "0.5972204", "0.5650804", "0.5517453", "0.54442674", "0.5379409", "0.53778493", "0.53348505", "0.533013", "0.53299606", "0.53284603", "0.5322139", "0.53043115", "0.5296058", "0.5293254", "0.52804536", "0.5264909", "0.52561975", "0.5254941", "0.5252345", "0.5240298", "0.5240193", "0.5232707", "0.5227994", "0.5219461", "0.52192163", "0.52183765", "0.5198674", "0.51874083", "0.5184178", "0.5159253" ]
0.5868379
1
r"""Plot cluster losses and population histogram using matplotlib.
def plot_cl_losses(self, cl_pop, cl_losses): import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel cl_width = 1 cl_losses = np.array(cl_losses) cl_pop = np.array(cl_pop) loss_sort = np.argsort(cl_losses) cl_pop = cl_pop[loss_sort] cl_losses = cl_losses[loss_sort] n_cl = len(cl_pop) cl_plot_x = np.array(range(n_cl)) * cl_width fig, ax_pop = plt.subplots(nrows=1, ncols=1, **self.kwargs_subplot) ax_loss = ax_pop.twinx() ax_loss.yaxis.set_ticks_position("left") ax_loss.yaxis.set_label_position("left") ax_pop.yaxis.set_ticks_position("right") ax_pop.yaxis.set_label_position("right") # Cluster losses ax_loss.set_ylabel(self.loss_func.__name__) ax_loss.vlines( x=cl_plot_x, ymin=0, ymax=cl_losses, linewidth=0.8, color=self.plot_lolli_color, ) ax_loss.scatter(cl_plot_x, cl_losses, s=2, color=self.plot_lolli_color) # Losses mean ax_loss.axhline( np.mean(cl_losses), color=self.plot_lolli_color, alpha=1, linewidth=1.0, linestyle=":", ) ax_loss.text(0.5, np.mean(cl_losses), "Mean", fontsize=8) # population histogram (bar chart) ax_pop.set_xlabel("Cluster") ax_pop.set_ylabel("Size") edge_shift = cl_width / 2 edges = [i - edge_shift for i in cl_plot_x] + [cl_plot_x[-1] + edge_shift] ax_pop.stairs( values=cl_pop, edges=edges, fill=False, baseline=0.0, zorder=-1.0, edgecolor="lightgrey", alpha=1.0, ) # Annotate with cluster index if self.plot_annotate_cl_idx: for i, cl_idx in enumerate(loss_sort): cl_x = cl_plot_x[i] if cl_idx < 10: x_disp = -1.5 else: x_disp = -2.7 ax_loss.annotate( str(cl_idx), (cl_x, cl_losses[i]), xytext=(x_disp, 3), xycoords="data", fontsize=4, fontweight="bold", textcoords="offset points", color=self.plot_lolli_color, ) # Handle axes label ax_pop.set_xticks([]) ax_loss.set_xlim(left=edges[0], right=edges[-1]) ax_loss.set_ylim(bottom=0) ax_pop.set_ylim(bottom=0) return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_clusters(self):\n pass", "def plot_cluster_histogram(clusters, saveAs=None):\n \n cluster_labels = np.unique(clusters)\n k = len(cluster_labels)\n \n heights = [(clusters == l).sum() for l in cluster_labels]\n \n plt.close('all') \n fig, ax = plt.subplots(1,1)\n sns.set_style('white')\n ax.bar(range(1, k+1), heights) \n show_values_on_bars(ax)\n plt.title('n={} clusters'.format(k))\n plt.xlabel('Clusters', labelpad=10)\n plt.ylabel('Number of strains', labelpad=10)\n \n if saveAs is not None:\n plt.savefig(saveAs, dpi=300)\n else:\n plt.show()", "def interactions_plot():\n data = load_data('ints_CC'),load_data('ints_CD')\n fig,ax = plt.subplots()\n plot_mean_std(data_CC,ax,'C-C interactions')\n plot_mean_std(data_CD,ax,'C-D interactions')\n plt.xlabel('cluster size, n')\n plt.legend(loc='best')\n plt.savefig('interactions.pdf')", "def plot_clusters_cuisines(i,cuisine_countries_clusters):\n df=pd.DataFrame(group_by_cluster.iloc[i,:])\n df.reset_index(level=0, inplace=True)\n df.columns=['cuisine','count']\n df=df.sort_values(by='count',ascending=False)\n sns.set(rc={'figure.figsize':(11.7,5.27)})\n sns.barplot(x=\"cuisine\", y='count', data=df)\n plt.xticks(rotation=90)\n plt.title('cluster '+str(i)+ ' count: '+str(Counter(cuisine_countries_clusters)[i]))\n plt.tight_layout()\n plt.show()", "def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))", "def plot_loss(self):\n #x = [k for k in range(self.rep)]\n loss = self.min_list[:,0]//100 #For clarity\n #plt.plot(x,self.min_list[:,0])\n plt.hist(loss,density=True)\n plt.xlabel(self.list_name + '_loss//100')\n plt.ylabel('Frequency')\n #plt.xticks(range(8),[0,250,500,750,1000,1250,1500,1750])\n plt.title('Distribution of '+self.list_name+'_loss ('+str(self.rep)+' iterations)')\n plt.savefig('img/stats/'+self.list_name+'_lossFrequency_'+self.model_name+'.png')\n plt.show()", "def plot_clusters(cluster_1, cluster_2):\r\n plt.figure(figsize=(14, 7))\r\n plt.bar([i - 0.1 for i in cluster_1.keys()], cluster_1.values(), width=0.2, align='center', color='b',\r\n label='German Population')\r\n plt.bar([i + 0.1 for i in cluster_2.keys()], cluster_2.values(), width=0.2, align='center', color='g',\r\n label='Customer Population')\r\n plt.title('German Population versus Customers')\r\n plt.xlabel('Cluster No.')\r\n plt.ylabel('Cluster %')\r\n plt.xticks(range(1, len(cluster_1) + 1))\r\n plt.legend()\r\n plt.savefig('cluster_map.png')\r\n plt.show()\r\n\r\n return", "def plot_lcga(betas, time, data, degree, clusters_pred, title=None, varname=None):\n # we'll take advantage of the similarity between our problem and to plot a GCM estimation with classes\n # and have a very similar function, differing on the data format\n N,T = data.shape\n assert T == len(time)\n assert clusters_pred.shape in [(N,),(N,1)]\n if clusters_pred.shape == (N,1):\n clusters_pred = clusters_pred.flatten() \n if not np.issubdtype(clusters_pred.dtype, np.integer):\n clusterspred_int = clusters_pred.astype(int)\n assert np.all(clusterspred_int == clusters_pred), 'clusters_pred entries in categorical form should belong to some np.integer dtype'\n clusters_pred = clusterspred_int\n n_clusters = max(clusters_pred)+1\n assert len(betas) == n_clusters\n colors = {0:'tab:blue', 1:'tab:orange', 2:'tab:green', 3:'tab:red', 4:'tab:purple',\n 5:'tab:brown', 6:'tab:pink', 7:'tab:gray', 8:'tab:olive', 9:'tab:cyan'}\n plt.figure()\n # plot individual, observed curves\n for i in range(N):\n # do not plot people that belong to groups that are not present in groups2plot:\n # if tuple(groups[i]) not in groups2plot:\n # continue\n color = colors[clusters_pred[i]]\n plt.plot(time, data[i], color=color, linestyle='dotted', linewidth=1)\n # plot population-level curves\n interval = np.linspace(time[0],time[-1], 100)\n for counter in range(n_clusters):\n curve = np.zeros(100)\n coeffs = np.copy(betas[counter])\n for i in range(degree+1):\n curve += coeffs[i] * interval**i\n plt.plot(interval, curve, color=colors[counter], linewidth=5)\n # legends\n legend = ['group '+str(x) for x in range(n_clusters)]\n handles = [Line2D([0],[0],color=colors[i]) for i in range(n_clusters)]\n plt.legend(handles, legend)\n plt.xlabel(\"time steps\")\n varname = 'y' if varname is None else varname\n plt.ylabel(varname)\n if title:\n plt.title(title)\n plt.show()", "def display_clusters(assign):\n for c in assign:\n plt.plot(c[0], c[1], \"r*\")\n plt.plot(\n [p[0] for p in assign[c]],\n [p[1] for p in assign[c]],\n \"o\"\n )\n plt.show()\n plt.close()", "def plot_cost_vs_clusters(cost,num_clusters,filename):\n fig, ax = plt.subplots()\n ax.plot(num_clusters,cost)\n ax.grid()\n ax.set_xlabel(\"Number of clusters\")\n ax.set_ylabel(\"Cost of dropping off TAs\")\n fig.savefig(filename)\n plt.close()", "def show_plot(self):\n label_1 = (self.own_name_1 + \"'s account\")\n label_2 = (self.own_name_2 + \"'s account\")\n clusters = 3\n counts_1 = (self.op_full_name_count_1, self.op_first_name_count_1, self.op_last_name_count_1)\n counts_2 = (self.op_full_name_count_2, self.op_first_name_count_2, self.op_last_name_count_2)\n fig, ax = plt.subplots()\n index = np.arange(clusters)\n bar_width = 0.2\n opacity = 0.5\n rects1 = plt.bar(index, counts_1, bar_width, alpha=opacity, color=\"b\", label=label_1)\n rects2 = plt.bar(index + bar_width, counts_2, bar_width, alpha=opacity, color=\"g\", label=label_2)\n #plt.xlabel(\"Name forms\")\n plt.ylabel(\"Number of references\")\n plt.title(\"Reference of opponents name\")\n plt.xticks(index + bar_width, (\"Opponent's Full Name\", \"Opponent's First Name only\", \"Opponent's Last name only\"))\n plt.legend()\n plt.tight_layout()\n plt.show()", "def plot_clusters(self):\n w = self.w\n h = self.h\n largest_cluster, largest_cluster_size = self.get_largest_cluster()\n\n # Compute space step\n dx = 1. / max(w, h)\n\n # Create figure\n ax = self.create_figure()\n\n for i in range(w + 1):\n for j in range(h + 1):\n if self.cluster[i, j] == largest_cluster:\n color = self.largest_cluster_color\n else:\n color = self.other_clusters_color\n # Plot horizontal edge\n if i <= w - 1 and self.sample[i, j, 0] == 1:\n ax.plot([i * dx, (i + 1) * dx], [j * dx, j * dx],\n color=color)\n # Plot vertical edge\n if j <= h - 1 and self.sample[i, j, 1] == 1:\n ax.plot([i * dx, i * dx], [j * dx, (j + 1) * dx],\n color=color)\n\n self.set_title(ax)\n self.set_legend(ax, largest_cluster_size)", "def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()", "def plt_gm_clusters(df_all, model):\n\n # color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])\n\n color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])\n\n df = df_all[featureSet_dic[clus_params['feat_list']]].copy()\n\n XX = df.values\n Y_ = model.predict(XX) # predict labels for each model\n\n plt.figure(figsize=(8, 6))\n splot = plt.subplot(1, 1, 1)\n\n for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):\n\n if \"MEAN\" in clus_params['feat_list']:\n v, w = linalg.eigh(cov)\n else:\n\n subset = [0, 5] # mean torque L & R\n v, w = linalg.eigh(cov[np.ix_(subset, subset)])\n mean = np.array([mean[0], mean[5]])\n\n if not np.any(Y_ == i):\n continue\n\n if \"MEAN\" in clus_params['feat_list']:\n plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)\n else:\n plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)\n\n # Plot an ellipse to show the Gaussian component\n angle = np.arctan2(w[0][1], w[0][0])\n angle = 180. * angle / np.pi # convert to degrees\n v = 2. * np.sqrt(2.) * np.sqrt(v)\n ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)\n ell.set_clip_box(splot.bbox)\n ell.set_alpha(.5)\n splot.add_artist(ell)\n\n plt.xticks(())\n plt.yticks(())\n\n plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))\n plt.subplots_adjust(hspace=.35, bottom=.02)\n plt.show()", "def PlotHist(self, label=None):\n ys, xs, patches = plt.hist(self.test_stats)\n plt.vlines(self.actual, 0, max(ys), linewidth=3, color='black')\n plt.xlabel('test statistic')\n plt.ylabel('count')\n plt.show()", "def plot_loss(G_losses, D_losses):\n plt.figure(figsize=(10,5))\n plt.title(\"Generator and Discriminator Loss During Training\")\n plt.plot(G_losses,label=\"G\")\n plt.plot(D_losses,label=\"D\")\n plt.xlabel(\"iterations\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.show()", "def plot_class_distribution(data):\n classes = [r[0] for r in data]\n plt.hist(classes)\n plt.xlabel('Labels')\n plt.ylabel('Counts')\n plt.title('Histogram of class counts')\n plt.show()", "def setup_plot(self):\n\n # Get all the healthy, immune, infected, and dead people seperately \n healthy_x = self.putil.population.get_all_healthy()[:, index.x_axis]\n healthy_y = self.putil.population.get_all_healthy()[:, index.y_axis]\n infected_x = self.putil.population.get_all_infected()[:, index.x_axis]\n infected_y = self.putil.population.get_all_infected()[:, index.y_axis]\n immune_x = self.putil.population.get_all_recovered()[:, index.x_axis]\n immune_y = self.putil.population.get_all_recovered()[:, index.y_axis]\n dead_x = self.putil.population.get_all_dead()[:, index.x_axis]\n dead_y = self.putil.population.get_all_dead()[:, index.y_axis]\n total_infected = self.putil.size - len(healthy_x)\n total_hospitalized = len(self.putil.persons[self.putil.persons[:,index.hospitalized] == 3])\n \n # Current healthcare status\n self.healthcare_status = \"Normal\"\n \n # Scatter plots to plot people\n self.scat = self.ax.scatter(healthy_x,\n healthy_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"lightsteelblue\", s=10)\n self.scat2 = self.ax.scatter(infected_x,\n infected_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indianred\", s=10)\n self.scat3 = self.ax.scatter(immune_x,\n immune_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"mediumseagreen\", s=10)\n self.scat4 = self.ax.scatter(dead_x,\n dead_y, vmin=0, vmax=1,\n cmap=\"jet\", c=\"indigo\", s=10)\n # Lists for line graph\n self.infected = []\n self.infected_total = []\n self.deaths = []\n self.frames = []\n self.immunes = []\n self.infected.append(len(infected_x))\n self.deaths.append(len(dead_x))\n self.infected_total.append(self.putil.size - len(healthy_x))\n self.immunes.append(len(immune_x))\n self.frames.append(0)\n\n # Line graph plotting number\n self.total_infected, = self.ax1.plot(self.frames, self.infected_total)\n self.currently_infected, = self.ax1.plot(self.frames, self.infected, c=\"indianred\", label='Currently Infected')\n self.total_deaths, = self.ax1.plot(self.frames, self.deaths, c=\"indigo\", label='Total Dead')\n self.total_immune, = self.ax1.plot(self.frames, self.immunes, c=\"mediumseagreen\", label='Total Immune')\n\n # Code below prints statistics \n if(self.putil.enforce_social_distance_at > 0):\n self.ax1.plot([self.putil.enforce_social_distance_at]*2, [0,self.putil.size],c=\"gold\", label=\"Social Distancing\")\n self.social_distancing_info = (\"At frame \" + str(self.putil.enforce_social_distance_at))\n self.social_distancing_num = str(int(self.putil.social_distance_per * self.putil.size)) + \" or \" + str(self.putil.social_distance_per*100)+\"%\"\n else:\n self.social_distancing_info = (\"Disabled\")\n self.social_distancing_num = \"0 or 0%\"\n\n if(self.putil.enforce_mask_wearing_at > 0):\n self.ax1.plot([self.putil.enforce_mask_wearing_at]*2, [0,self.putil.size],c=\"hotpink\", label=\"Mask Mandate\")\n self.mask_wearing_info = \"At frame \" + str(self.putil.enforce_mask_wearing_at) \n else:\n self.mask_wearing_info = \"Disabled\"\n\n self.ax1.tick_params(axis=\"y\",direction=\"in\", pad=3)\n self.ax1.plot([0,1000],[self.putil.virus.total_healthcare_capacity]*2, c=\"silver\")\n self.ax1.get_xaxis().set_visible(False)\n self.ax1.legend(prop={'size': 8},loc='upper right')\n self.ax2.text(0,1,\"Statistics\", fontsize='large' , fontweight='bold')\n self.ax2.text(0,-0.5, \"Frame:\\nCurrently Infected:\\nHealthy People:\\nImmune People:\\nTotal Deaths:\\nHealthcare Conditions:\")\n self.ax2.text(0.54,-0.5, \"Population:\\nMasks Wearing:\\nSocial Distancing:\\nPeople Distancing:\\nTotal Infected:\\n\")\n self.ax.text(0,1.06, \"Simulation\", fontsize='xx-large' , fontweight='bold')\n self.text = self.ax2.text(0.33, -0.5, \"%i \\n%i \\n%s \\n%s \\n%s \\n%s\" %(0,len(infected_x),str(len(healthy_x)) + \" or 0%\", str(len(immune_x)) + \" or 0%\",str(len(dead_x)) + \" or 0%\",self.healthcare_status))\n self.text2 = self.ax2.text(0.81,-0.5,\"%d \\n%s \\n%s \\n%s \\n%s\\n\" % (self.putil.size, self.mask_wearing_info, self.social_distancing_info, self.social_distancing_num , total_infected))\n\n return self.scat, self.scat2, self.scat3, self.scat4, self.currently_infected, self.total_infected,", "def plot_umap_clusters(ax, df):\n labels = cluster_labels(df['cluster_id'])\n color_map = get_cluster_color_map(df['cluster_id'].values)\n\n if -1 in labels:\n df_noise = df[df['cluster_id'] < 0]\n ax.scatter(\n df_noise['umap1'].values,\n df_noise['umap2'].values,\n color=color_map[-1],\n s=2,\n label=labels[-1],\n )\n\n text_labels = []\n for cluster_id, cluster_df in df[df['cluster_id'] >= 0].groupby('cluster_id'):\n ax.scatter(\n cluster_df['umap1'].values,\n cluster_df['umap2'].values,\n color=color_map[cluster_id],\n s=2,\n label=labels[int(cluster_id)],\n )\n\n label_pos = df.groupby('cluster_id').mean()\n text_labels = [\n ax.text(label_pos.at[c, 'umap1'], label_pos.at[c, 'umap2'], c)\n for c in list(labels.keys()) if c >= 0\n ]\n adjust_text(\n text_labels, ax=ax,\n force_points=(0.1, 0.1)\n )\n\n ax.legend(\n frameon=False, markerscale=5,\n scatterpoints=1, bbox_to_anchor=(0.96, 0.85))\n ax.set_xlabel('Comp. 1')\n ax.set_ylabel('Comp. 2')\n seaborn.despine(ax=ax, offset=0, trim=True)", "def plot_cluster_rd_space(n_cluster, algo = 'KMeans', pca = True):\n df = apps_df.copy()\n \n \n # optionally set downloads to log. Does not change much though.\n #df = df[df['downloads'] <= 2000000]\n mean_rating = df['rating'].mean()\n mean_downloads = math.log(df[df[algo] >= 0]['downloads'].mean())\n cluster_ratings = [df[df[str(algo)] == i]['rating'].mean() for i in range(n_cluster)]\n cluster_downloads = [math.log(df[df[algo] == i]['downloads'].mean()) for i in range(n_cluster)]\n \n font = {'size' : 16}\n plt.rc('font', **font)\n sns.set_style(\"whitegrid\")\n plt.figure(figsize=(8,8))\n plt.scatter(cluster_downloads, cluster_ratings, marker = '.', color = 'black', s=150)\n plt.xlim(min(cluster_downloads), max(cluster_downloads))\n plt.ylim(min(cluster_ratings)-0.01,max(cluster_ratings)+0.01)\n plt.axvline(x=mean_downloads, ymin=0, ymax=max(cluster_ratings), linestyle = '--')\n plt.axhline(y=mean_rating, xmin=0, xmax=max(cluster_downloads), linestyle = '--')\n plt.xlabel('avg. downloads (log scale)', fontsize=16)\n plt.ylabel('avg. ratings', fontsize=16)\n for i in range(n_cluster):\n plt.annotate(str(i), \n (cluster_downloads[i],cluster_ratings[i]))\n plt.axhspan(ymin=min(cluster_ratings)-0.01, ymax=mean_rating, facecolor='c', alpha=0.5)\n plt.axhspan(mean_rating, max(cluster_ratings)+0.01, facecolor='r', alpha=0.5)\n plt.axvspan(min(cluster_downloads), mean_downloads, facecolor='grey', alpha=0.3)\n plt.axvspan(mean_downloads, max(cluster_downloads), facecolor='w', alpha=0.5)\n plt.tick_params(axis='x', labelsize=16)\n plt.tick_params(axis='y', labelsize=16)\n #if pca: \n # plt.title('{} PCA Clusters in Download / Rating Space'.format(algo))\n #else: \n # plt.title('{} NON_PCA Clusters in Download / Rating Space'.format(algo))\n\n plt.show()", "def plot_cluster_data(\n data,\n in_p_x,\n out_p_x,\n in_p_y,\n out_p_y,\n reduce_name,\n logx,\n logy,\n width,\n height,\n font_scale,\n title,\n save_path,\n latex,\n save,\n dot_size,\n):\n sns.set(rc={\"figure.figsize\": (width, height), \"text.usetex\": latex})\n fig = Figure()\n # pylint: disable=no-member\n ax = fig.subplots()\n x, x_label = parse_axis_name(in_p_x, out_p_x, reduce_name)\n y, y_label = parse_axis_name(in_p_y, out_p_y, reduce_name)\n histogram = (in_p_x == in_p_y and out_p_x == out_p_y) or (\n in_p_x == in_p_y and \"/\" not in x\n )\n if histogram:\n palette = \"tab10\"\n if len(set(data[\"cluster\"])) > 10:\n palette = \"tab20\"\n sns.histplot(\n data=data,\n x=x,\n hue=\"cluster\",\n palette=palette,\n multiple=\"stack\",\n bins=100,\n log_scale=(logx, logy),\n ax=ax,\n )\n else:\n sns.scatterplot(\n data=data,\n x=x,\n y=y,\n hue=\"cluster\",\n palette=\"tab10\",\n s=dot_size,\n ax=ax,\n )\n if logx and not histogram:\n if np.nanmin(data[x]) < 0:\n linthresh = np.nanmin(np.abs(data[x].where(data[x] != 0)))\n ax.set_xscale(\"symlog\", linthresh=linthresh)\n else:\n ax.set_xscale(\"log\")\n if logy and not histogram:\n if np.nanmin(data[y]) < 0:\n linthresh = np.nanmin(np.abs(data[y].where(data[y] != 0)))\n ax.set_yscale(\"symlog\", linthresh=linthresh)\n else:\n ax.set_yscale(\"log\")\n ax.tick_params(\n axis=\"both\",\n which=\"major\",\n labelsize=int(10 * font_scale),\n )\n _ = ax.set_title(title, fontsize=int(12 * font_scale))\n ax.set_xlabel(x_label, fontsize=int(11 * font_scale))\n ax.set_ylabel(y_label, fontsize=int(11 * font_scale))\n legend = ax.get_legend()\n legend.set_title(\"cluster\", prop={\"size\": int(11 * font_scale)})\n plt.setp(legend.get_texts(), fontsize=int(10 * font_scale))\n ax.yaxis.get_offset_text().set_fontsize(int(11 * font_scale))\n ax.xaxis.get_offset_text().set_fontsize(int(11 * font_scale))\n # You may use the following line to remove the offset label if needed.\n # ax.xaxis.get_offset_text().set(alpha=0)\n if save:\n try:\n save_name = get_save_name(save_path)\n ax.figure.savefig(save_name, bbox_inches=\"tight\", dpi=300)\n except IOError:\n save_path = f\"Could not save to {save_path}. Did you forget the filetype?\"\n save_path = None\n save = False\n return fig", "def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)", "def plot_directed(glomnums):\n odor_corrs_means = []\n odor_corrs_SDs = []\n air_corrs_means = []\n air_corrs_SDs = []\n corrs_deltafrate = []\n fig = figure()\n for gni,glomnum in enumerate(glomnums):\n print \"Computing phasic and deltafrate correlations for # of gloms =\",glomnum\n ## Set graph=True below to plot neg corr-ed responses too.\n corr_deltafrate, odor_corrs, air_corrs, overall_odor_mean, overall_air_mean = \\\n plot_decorrs_special([glomnum],graph=True)\n ax = fig.add_subplot(len(glomnums),1,gni+1)\n #hist(air_corrs,20,range=(-1.0,1.0),normed=True,histtype='step',\\\n # color='b',linewidth=2,label='air %2.1f'%overall_air_mean+'Hz')\n hist(odor_corrs,20,range=(-1.0,1.0),normed=True,histtype='step',\\\n color='r',linewidth=2,label='odor %2.1f'%overall_odor_mean+'Hz')\n ax.set_xticks([])\n #ax.set_xticklabels(['0.75','1.25'])\n ## just to scale up the ticks fontsize.\n axes_labels(ax,'','',adjustpos=False,fontsize=34)\n\n corrs_deltafrate.append(corr_deltafrate)\n ## mean and SD of phasic correlations of odor and air\n odor_corrs_means.append(mean(odor_corrs))\n odor_corrs_SDs.append(std(odor_corrs))\n air_corrs_means.append(mean(air_corrs))\n air_corrs_SDs.append(std(air_corrs))\n\n ax.set_yticks([])\n #biglegend(legendlocation='upper left')\n if gni == len(glomnums)-1:\n ax.set_xticks([-1.0,0.0,1.0])\n ax.set_xticklabels(['-1','0','1'])\n axes_labels(ax,'phase correlation','',adjustpos=False,fontsize=30)\n plt.tight_layout()\n\n ## mean phase corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n #plot(glomnums,air_corrs_means,color='b',linewidth=2,label='air')\n plot(glomnums,odor_corrs_means,color='r',linewidth=2,label='odor')\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','phase correlation mean',\\\n adjustpos=False,fontsize=30)\n #biglegend(legendlocation='lower left')\n plt.tight_layout()\n ## spread of phase corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n #errorbar(glomnums,air_corrs_SDs,color='b',linewidth=2,label='air')\n errorbar(glomnums,odor_corrs_SDs,color='r',linewidth=2,label='odor')\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','phase correlation spread',\\\n adjustpos=False,fontsize=30)\n #biglegend(legendlocation='upper left')\n plt.tight_layout()\n ## delta frate corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n plot(glomnums,corrs_deltafrate,color='b',linewidth=2)\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','$\\Delta$frate correlation',\\\n adjustpos=False,fontsize=30)\n tight_layout()", "def plot_loss(self):\n train_elbo_range = range(len(self.train_elbo_hist))\n val_elbo_range = range(len(self.val_elbo_hist))\n train_loss_range = range(len(self.train_loss_hist))\n val_loss_range = range(len(self.val_loss_hist))\n\n fig, ax = plt.subplots(2, 2)\n ax[0][0].plot(train_elbo_range, self.train_elbo_hist)\n ax[0][0].title.set_text(\"Train ELBO\")\n ax[0][1].plot(val_elbo_range, self.val_elbo_hist)\n ax[0][1].title.set_text(\"Val ELBO\")\n ax[1][0].plot(train_loss_range, self.train_loss_hist)\n ax[1][0].title.set_text(\"Train MSE\")\n ax[1][1].plot(val_loss_range, self.val_loss_hist)\n ax[1][1].title.set_text(\"Val MSE\")\n plt.tight_layout()\n plt.show()", "def plotDistributionWithGeneHistogram(lXs, lYs, lZs, lZOthers,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\", yax2=\"yax2\"):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n gs = gridspec.GridSpec(2, 1, width_ratios=[1],height_ratios=[1, 3], hspace=0.1) \n ax1 = fig.add_subplot(gs[0])\n ax1.plot(lXs,lZOthers)\n lZmin = [0] * len(lZs)\n ax1.vlines(lXs,lZmin,lZOthers, colors='grey', alpha=0.15)\n if max(lZOthers) <= 0:\n ax1.set_ylim(0,1)\n ax1.set_xlim(lXs[0],lXs[-1])\n lZmax = lZs\n lZmin2 = [300] * len(lZs)\n ax2 = fig.add_subplot(gs[1])\n ax2.vlines(lXs,lZmin,lZmax, colors='grey', alpha=0.15)\n ax3 = ax2.twinx()\n ax3.plot(lXs,lYs)\n ax2.set_xlim(lXs[0],lXs[-1])\n ax2.set_ylim(0,max(lZs)+int(max(lZs)*0.05))\n #ax3.set_ylim(min(lYs)-1,max(lYs)+1)\n axis_font = {'size':'28'}\n ax2.set_xlabel(xax, **axis_font)\n ax3.set_ylabel(yax2, **axis_font)\n ax2.set_ylabel(yax, **axis_font)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()", "def showVs(df, feat1, feat2):\n colors = ['blue', 'red', 'green', 'coral']\n for u in range(len(cBouts)):\n plt.plot(f[f['clust_ind'] == u][feat1],\n f[f['clust_ind'] == u][feat2], 'o', color=colors[u],\n alpha=0.6, markeredgecolor='none')\n plt.xlabel(feat1)\n plt.ylabel(feat2)\n plt.show()\n return", "def hist(self, color=\"#FFFFFF\", axes_style=\"darkgrid\", context=\"notebook\",\n col_wrap=4, exhibit_path=None, **kwargs):\n import matplotlib.pyplot as plt\n import seaborn as sns\n sns.set_context(context)\n\n # data0 = self.sims_data[[\"sim\", \"origin\", \"dev\", \"rectype\", \"latest\", \"reserve\",]]\n # data0 = data0[(data0[\"dev\"]==data0[\"dev\"].max()) & (data0[\"rectype\"]==\"forecast\")].reset_index(drop=True)\n # data0 = data0.drop([\"dev\", \"rectype\", \"latest\"], axis=1)\n #\n # # Include additional origin representing aggregate distribution.\n # data1 = data0.groupby(\"sim\", as_index=False)[[\"reserve\"]].sum()\n # data1[\"origin\"] =\"total\"\n # data = pd.concat([data0, data1])\n data = self.reserve_dist\n\n # Get mean, min and max ultimate and reserve by origin.\n med_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].median().rename(\n {\"reserve\": \"med_res\"}, axis=1).set_index(\"origin\")\n min_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].min().rename(\n {\"reserve\": \"min_res\"}, axis=1).set_index(\"origin\")\n max_data = data.groupby(\"origin\", as_index=False)[[\"reserve\"]].max().rename(\n {\"reserve\": \"max_res\"}, axis=1).set_index(\"origin\")\n dfmetrics = functools.reduce(lambda df1, df2: df1.join(df2), (med_data, min_data, max_data))\n dfmetrics = dfmetrics.applymap(lambda v: 0 if v < 0 else v).reset_index(drop=False)\n\n with sns.axes_style(axes_style):\n\n pltkwargs = {\"color\": color, \"bins\": 20, \"edgecolor\": \"#484848\",\n \"alpha\": 1., \"linewidth\": .45}\n\n if kwargs is not None:\n pltkwargs.update(kwargs)\n\n grid = sns.FacetGrid(\n data, col=\"origin\", col_wrap=col_wrap, margin_titles=False,\n despine=True, sharex=False, sharey=False,\n )\n\n hists = grid.map(plt.hist, \"reserve\", **pltkwargs)\n grid.set_axis_labels(\"\", \"\")\n grid.set_titles(\"\", size=6)\n\n # Change ticklabel font size and place legend on each facet.\n origin_vals = sorted([int(ii) for ii in data[\"origin\"].unique() if ii != \"total\"])\n dindex = {jj: ii for ii, jj in enumerate(origin_vals)}\n dindex.update({\"total\": max(dindex.values()) + 1})\n data[\"origin_index\"] = data[\"origin\"].map(dindex)\n origin_order = data[[\"origin_index\", \"origin\"]].drop_duplicates().sort_values(\n \"origin_index\"\n ).origin.values\n\n with warnings.catch_warnings():\n\n warnings.simplefilter(\"ignore\")\n\n for origin, ax_ii in zip(origin_order, grid.axes):\n\n # xmin = np.max([0, dfmetrics[dfmetrics.origin == origin][\"min_res\"].item()])\n xmax = dfmetrics[dfmetrics.origin == origin][\"max_res\"].item() * 1.025\n xmed = dfmetrics[dfmetrics.origin == origin][\"med_res\"].item()\n origin_str = \"{}\".format(origin)\n ax_ii.set_xlim([0, xmax])\n ax_ii.axvline(xmed, color=\"#E02C70\", linestyle=\"--\", linewidth=1.5)\n ax_ii.grid(False)\n\n ymedloc = max(rect.get_height() for rect in ax_ii.patches) * .30\n ax_ii.set_yticks([])\n ax_ii.set_yticklabels([])\n ax_ii.tick_params(\n axis=\"x\", which=\"both\", bottom=True, top=False, labelbottom=True\n )\n ax_ii.set_xticklabels(\n [\"{:,.0f}\".format(jj) for jj in ax_ii.get_xticks()], size=7\n )\n ax_ii.annotate(\n origin_str, xy=(.85, .925), xycoords='axes fraction',\n textcoords='axes fraction', fontsize=9, rotation=0, color=\"#000000\",\n )\n ax_ii.annotate(\n \"median = {:,.0f}\".format(xmed), (xmed, ymedloc), xytext=(7.5, 0),\n textcoords=\"offset points\", ha=\"center\", va=\"bottom\", fontsize=7,\n rotation=90, color=\"#000000\"\n )\n\n # Draw border around each facet.\n for _, spine in ax_ii.spines.items():\n spine.set(visible=True, color=\"#000000\", linewidth=.50)\n\n if exhibit_path is not None:\n plt.savefig(exhibit_path)\n else:\n plt.show()", "def plot(self, p: int):\n self.compute_clusters(p)\n self.plot_clusters()", "def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()" ]
[ "0.6944744", "0.68056774", "0.65191925", "0.6483268", "0.6473939", "0.6376142", "0.63496155", "0.62911314", "0.6242816", "0.6240282", "0.6223415", "0.61980224", "0.615062", "0.61354357", "0.611308", "0.61090106", "0.6094741", "0.60937613", "0.6079056", "0.60751575", "0.6067895", "0.6062142", "0.6053813", "0.6047508", "0.6047491", "0.6020831", "0.60179865", "0.5974925", "0.597366", "0.5969935" ]
0.74593276
0
Archive processor has respect 'template' argument.
def test_args_template(testapp): stream = archive.process( testapp, [holocron.Item({"title": "The Force", "content": "Obi-Wan"})], template="foobar.txt", ) assert isinstance(stream, collections.abc.Iterable) assert list(stream) == [ holocron.Item({"title": "The Force", "content": "Obi-Wan"}), holocron.WebSiteItem( { "source": pathlib.Path("archive://index.html"), "destination": pathlib.Path("index.html"), "template": "foobar.txt", "items": [holocron.Item({"title": "The Force", "content": "Obi-Wan"})], "baseurl": testapp.metadata["url"], } ), ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_archiver(random, population, archive, args):\r\n return archive", "def master_archive(f, e):\n template = e.get_template(TEMPLATES['archive'])\n write_file(\"archives.html\", template.render(entries=f))", "def _archive_project(name, buff, files=None, repo=None, branch='master',\n ignore_deleted=False):\n if repo is None:\n repo = Repoman.open_repo(name)\n now = datetime.now().timetuple()[:6]\n archive = zipfile.ZipFile(buff, \"w\", zipfile.ZIP_DEFLATED)\n files_list = files if files is not None else \\\n repo.list_files_for_branch(branch)\n all_files = files_list if files is None else \\\n repo.list_files_for_branch(branch)\n\n template_paths = defaultdict(list)\n for file_path in all_files:\n split_file_path = file_path.split('/')\n if len(split_file_path) > 2:\n template_paths[split_file_path[1]].append(file_path)\n extractors = json.loads(repo.file_contents_for_branch('extractors.json',\n branch) or '{}')\n\n seen_files = set()\n spiders = set()\n for file_path in files_list:\n if file_path.startswith('spiders'):\n try:\n parts = file_path.split(\"/\")\n if len(parts) >= 2:\n spider_name = parts[1]\n if spider_name.endswith('.json'):\n spider_name = spider_name[:-5]\n if spider_name not in spiders:\n # Load spider if necessary\n if len(parts) > 2:\n file_path = 'spiders/' + spider_name + '.json'\n file_contents = repo.file_contents_for_branch(\n file_path, branch)\n as_json = json.loads(file_contents)\n templates = []\n # Load all spider templates\n spider_templates = template_paths.get(spider_name, [])\n for template_path in spider_templates:\n seen_files.add(template_path)\n existing = {}\n # Ignore deleted templates\n try:\n templ_contents = repo.file_contents_for_branch(\n template_path, branch)\n except (TypeError, ValueError):\n continue\n json_template = json.loads(templ_contents)\n # Validate extractors\n template_extractors = json_template.get(\n 'extractors', {})\n for field, eids in template_extractors.items():\n existing[field] = [eid for eid in eids\n if eid in extractors]\n json_template['extractors'] = existing\n spider_name = parts[1]\n templates.append(json_template)\n spiders.add(spider_name)\n as_json.pop('template_names', None)\n as_json['templates'] = templates\n _add_to_archive(archive, file_path,\n json.dumps(as_json), now)\n except TypeError:\n if ignore_deleted:\n continue\n # Handle Deleted Spiders\n file_contents = repo.file_contents_for_branch(file_path,\n 'master')\n file_info = {'deleted': True}\n if file_contents:\n as_json = json.loads(file_contents)\n _add_to_archive(archive, file_path, json.dumps(file_info), now)\n else:\n file_contents = repo.file_contents_for_branch(file_path, branch)\n _add_to_archive(archive, file_path, file_contents, now)\n seen_files.add(file_path)\n\n # Add empty placeholders for missing files required by dash\n for file_path in {'extractors.json', 'items.json'} - seen_files:\n _add_to_archive(archive, file_path, '{}', now)\n archive.close()", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def _archive(self):\n # LOG: change this to something archive specific\n self.set_property('processing_type', 'archive')\n self.should_copy = False\n self.is_recursive = True", "def archive_by_key(\n self,\n __template_id,\n __key,\n *,\n workflow_id=None,\n command_id=None,\n read_as=None,\n act_as=None,\n ):\n raise NotImplementedError", "def test_archive_run(self):\n pass", "def baseTemplate(*args, exists: bool=True, fileName: Union[AnyStr, bool]=\"\", force: bool=True,\n load: bool=True, matchFile: Union[AnyStr, bool]=\"\", silent: bool=True, unload:\n bool=True, viewList: Union[AnyStr, bool]=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def process_archive(self, file):\n self.recursive_archive_depth += 1\n # LOG: write_log or somehow log the archive file here\n if self.recursive_archive_depth >= self.max_recursive_depth:\n file.make_dangerous('Archive bomb')\n else:\n tempdir_path = file.make_tempdir()\n # TODO: double check we are properly escaping file.src_path\n # otherwise we are running unvalidated user input directly in the shell\n command_str = '{} -p1 x \"{}\" -o\"{}\" -bd -aoa'\n unpack_command = command_str.format(SEVENZ_PATH,\n file.src_path, tempdir_path)\n self._run_process(unpack_command)\n self.process_dir(tempdir_path, file.dst_path)\n self.safe_rmtree(tempdir_path)\n self.recursive_archive_depth -= 1", "def __get_packed_xwalk_app_template(self, dest_dir):\n input_file = urllib2.urlopen(self.updated_url)\n contents = input_file.read()\n input_file.close()\n file_path = os.path.join(dest_dir, self.file_name)\n if os.path.isfile(file_path):\n os.remove(file_path)\n file_dir = dest_dir + '/' + self.file_name.split('.tar.gz')[0]\n if os.path.exists(file_dir):\n shutil.rmtree(file_dir)\n output_file = open(file_path, 'w')\n output_file.write(contents)\n output_file.close()", "def pack_to(self, dest_file_template):\n fname = dest_file_template % self.manifest_version()\n shutil.make_archive(fname, \"zip\", self._crx_dir)\n return fname", "def tar(self, out=sys.stdout, config=None, *args, **kw):\r\n if config is not None:\r\n config = self.manifest.config_schema.validate(config)\r\n if self.manifest.get(\"templates\"):\r\n templates_dir = self.copy(templates=True)\r\n for template in self.find(templates=True):\r\n EJSTemplate(templates_dir + template).apply(templates_dir + template, config)\r\n tar = tarfile.open(\"\", mode=\"w|\", fileobj=out)\r\n templates = self.manifest.get(\"templates\")\r\n for path in self.find(*args, **kw):\r\n if config and path in templates:\r\n real_path = templates_dir + path\r\n EJSTemplate(real_path).apply(real_path, config)\r\n else:\r\n real_path = self.unchroot_path(path)\r\n tar.add(real_path, path, recursive=False)\r\n tar.close()", "def open(self, *args, **kwargs):\n return ZipFileArchiver(*args,**kwargs)", "def templateargs(self, target_jar, confs=None):\r\n raise NotImplementedError()", "def apply_to(self, template):\n pass", "def prepare_zip(self, filename, *args, **kwargs):\n\n return '/vsizip/' + filename, args, kwargs", "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def make_zip(self, project):\n return None", "def test_create_namespaced_processed_template(self):\n pass", "def main(cls, **kwargs):\n try:\n import file_transformer\n except Exception as e:\n sys.exit(\"{}\\nSee https://github.com/benkehoe/file-transformer\".format(e))\n \n def loader(input_stream, args):\n return yaml.load(input_stream)\n \n def processor(input, args):\n transform = cls(input, vars(args))\n transform.apply()\n return transform.template\n \n def dumper(output, output_stream, args):\n yaml.dump(output, output_stream)\n \n return file_transformer.main(processor, loader, dumper, **kwargs)", "def deploy(self):\n if not self._ini:\n self._load_template()\n if not self._ini:\n raise RuntimeError('Could not load template. __init__.ini missing or damaged.')\n if 'dirs' in self._ini:\n for dirname in self._ini['dirs']:\n comp_makedirs(os.path.join(self._path, dirname), exist_ok=True)\n if 'files' in self._ini:\n conf = ApplicationConf.get_instance()\n for filename in self._ini['files']:\n with comp_open(\n os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, filename),\n mode='r'\n ) as fp:\n content = fp.read()\n content = content.format(**conf)\n with comp_open(os.path.join(self._path, filename), mode='w') as wp:\n wp.write(content)\n if 'binaries' in self._ini:\n for filename in self._ini['binaries']:\n shutil.copy2(\n os.path.join(get_conf('DEFAULT_TEMPLATE_PATH'), self._template, filename),\n os.path.join(self._path, filename)\n )", "def archive(self):\n logging.info(_('Creating compressed archive...'))\n\n report_file_ext = 'bz2'\n compressor = 'bzip2'\n caller = Caller({})\n try:\n caller.call('xz --version')\n report_file_ext = 'xz'\n compressor = 'xz'\n except Exception:\n logging.debug('xz compression not available')\n\n if not os.path.exists(self.conf[\"output\"]):\n os.makedirs(self.conf[\"output\"])\n\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s.tar.%s\" % (\n 'LogCollector',\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n if self.conf[\"ticket_number\"]:\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s-%s.tar.%s\" % (\n 'LogCollector',\n self.conf[\"ticket_number\"],\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n config = {\n 'report': os.path.splitext(self.conf['path'])[0],\n 'compressed_report': self.conf['path'],\n 'compressor': compressor,\n 'directory': self.conf[\"local_tmp_dir\"],\n 'rname': os.path.basename(self.conf['path']).split('.')[0],\n }\n caller.configuration = config\n shutil.move(\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n 'working'\n ),\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n config[\"rname\"]\n ),\n )\n caller.call(\"tar -cf '%(report)s' -C '%(directory)s' '%(rname)s'\")\n shutil.rmtree(self.conf[\"local_tmp_dir\"])\n caller.call(\"%(compressor)s -1 '%(report)s'\")\n os.chmod(self.conf[\"path\"], stat.S_IRUSR | stat.S_IWUSR)\n sha256_out = caller.call(\"sha256sum '%(compressed_report)s'\")\n checksum = sha256_out.split()[0]\n with open(\"%s.sha256\" % self.conf[\"path\"], 'w') as checksum_file:\n checksum_file.write(sha256_out)\n\n msg = ''\n if os.path.exists(self.conf[\"path\"]):\n archiveSize = float(os.path.getsize(self.conf[\"path\"])) / (1 << 20)\n\n size = '%.1fM' % archiveSize\n\n msg = _(\n 'Log files have been collected and placed in {path}\\n'\n 'The sha256 for this file is {checksum} and its size is {size}'\n ).format(\n path=self.conf[\"path\"],\n size=size,\n checksum=checksum,\n )\n\n if archiveSize >= 1000:\n msg += _(\n '\\nYou can use the following filters -c, -d, -H in the '\n 'next execution to limit the number of Datacenters,\\n'\n 'Clusters or Hosts that are collected in order to '\n 'reduce the archive size.'\n )\n return msg", "def main(\n files: List[Path] = typer.Argument(default=None, dir_okay=False, exists=True),\n template: Optional[str] = typer.Option(\n None, '--template', help='Name of template file'\n ),\n logo: Optional[str] = typer.Option(None, '--logo', help='Name of logo file'),\n logo_width: Optional[str] = typer.Option(\n None, '--logo-width', help='Logo width (default 35mm)'\n ),\n highlight_style: Optional[str] = typer.Option(None, '--highlight-style',\n help='Specify coloring style to be used in highlighting source code'),\n syntax_definition: Optional[str] = typer.Option(None, '--syntax-definition',\n help='Specify a directory which contains syntax definition files'),\n no_toc: bool = typer.Option(\n False, '--no-toc', help='table of contents in PDF document'\n ),\n no_number_sections: bool = typer.Option(False, '--no-number-sections', help='no section numbering'),\n\n no_titlepage: bool = typer.Option(False, '--no-titlepage', help='title in PDF document'),\n tex_file: bool = typer.Option(\n False, '--tex', help='create TeX file instead of PDF document'\n ),\n email: Optional[str] = typer.Option(None, '--email', help='Author email'),\n company: Optional[str] = typer.Option(None, '--company', help='Name of company'),\n department: Optional[str] = typer.Option(\n None, '--department', help='Name of department'\n ),\n confidential: bool = typer.Option(\n False, '--confidential', help='indicate confidential'\n ),\n debug: bool = typer.Option(False, '--debug', help='turns debugging on'),\n pdf_engine: str = typer.Option(\n 'xelatex',\n '--pdf-engine',\n help='Specify pdf engine, one of lualatex, xelatex or tectonic ',\n ),\n _version: bool = typer.Option(\n None, '-V', '--version', callback=version_callback, help='Show version and exit'\n ),\n):\n\n if not files:\n typer.echo('Error: Must specify at least one .md file.')\n raise typer.Abort()\n\n mdfiles: List[str] = [str(md) for md in files]\n\n template = template or os.environ.get('MD2PDF_TEMPLATE')\n if template is None:\n print('No template specified')\n sys.exit(1)\n\n email = email or os.environ.get('MD2PDF_AUTHOR_EMAIL')\n footer_center = ''\n\n # command line overwrites `MD2PDF_PDF_ENGINE`. if both are not given\n # then `xelatex` is the default\n pdf_engine = pdf_engine or os.environ.get('MD2PDF_PDF_ENGINE') or 'xelatex'\n # check that pdf-engine is one of the following\n if pdf_engine not in ['xelatex', 'lualatex', 'tectonic']:\n print('--pdf-engine must be one of \"xelatex\", \"lualatex\", \"tectonic\"')\n sys.exit(1)\n\n ext = '.pdf'\n if tex_file:\n ext = '.tex'\n\n if len(mdfiles) == 1:\n toml_file = os.path.splitext(mdfiles[0])[0] + '.toml'\n\n if os.path.exists(toml_file):\n print(f'TOML file {toml_file} found')\n parsed_toml = toml.load(toml_file)\n default_val = parsed_toml.get('default')\n if default_val is None:\n print(f'No file names found in {toml_file}')\n else:\n mdfiles = default_val.get('files')\n\n for mdf in mdfiles:\n print(f'Compiling {mdf}')\n\n main_mdfile = os.path.realpath(mdfiles[0])\n\n outfile = Path(main_mdfile).stem + ext\n\n year = date.today().year\n\n company = company or os.environ.get('MD2PDF_COMPANY')\n department = department or os.environ.get('MD2PDF_DEPARTMENT')\n\n if company:\n if confidential:\n footer_center = f'© Copyright {year} {company}'\n else:\n footer_center = f'{year} {company}'\n\n pdcmd = PandocCmd(outfile)\n pdcmd.append(f'--template={template}')\n pdcmd.append(f'--pdf-engine={pdf_engine}')\n\n pdcmd.set_v('footer-center', footer_center)\n pdcmd.set_v('company', company)\n pdcmd.set_v('department', department)\n\n syntax_definition = syntax_definition or os.environ.get('MD2PDF_SYNTAX_DEFINITION_DIR')\n if syntax_definition is not None:\n add_syntax_definition(pdcmd, syntax_definition)\n\n pdcmd.append('--highlight-style')\n highlight_style = highlight_style or os.environ.get('MD2PDF_HIGHLIGHT_STYLE')\n if highlight_style is None:\n pdcmd.append('pygments')\n else:\n check_highlight_style(highlight_style)\n pdcmd.append(highlight_style)\n\n if not no_number_sections:\n pdcmd.append('--number-sections')\n\n if no_titlepage:\n pdcmd.set_m('titlepage', 'false')\n\n logo = logo or os.environ.get('MD2PDF_LOGO')\n pdcmd.set_v('logo', logo)\n\n logo_width = logo_width or os.environ.get('MD2PDF_LOGO_WIDTH')\n pdcmd.set_v('logo-width', logo_width)\n\n pdcmd.set_m('email', email)\n\n if not no_toc:\n pdcmd.append('--toc')\n\n pdcmd.extend(mdfiles)\n\n if debug:\n print(' '.join(pdcmd.pandoc))\n\n\n pdcmd.run()", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def test_unarchive_run(self):\n pass", "def __init__(__self__, *,\n galleries: pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]],\n resource_group_name: pulumi.Input[str],\n template_data: Any,\n author: Optional[pulumi.Input[str]] = None,\n localized: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateLocalizedGalleryArgs']]]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"galleries\", galleries)\n pulumi.set(__self__, \"resource_group_name\", resource_group_name)\n pulumi.set(__self__, \"template_data\", template_data)\n if author is not None:\n pulumi.set(__self__, \"author\", author)\n if localized is not None:\n pulumi.set(__self__, \"localized\", localized)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def _ProcessTemplate(self,topdir):\n self.dicomdir = \"%s/anatomicals\" % self.topdir\n self.rawdir = \"%s/raw\" % topdir\n self.rawdirs = {}\n tmplt = self._GetTemplate()\n if self.opts.outdir is not None:\n# Override template output directory.\n tmplt['top_outdir'] = self.opts.outdir\n self.tmplt = tmplt\n if len(tmplt['top_outdir']) == 0:\n tmplt['top_outdir'] = os.path.realpath(self.topdir)\n raise RuntimeError('Template file must specify an output directory.')\n tmplt['top_outdir'] = os.path.realpath(tmplt['top_outdir'])\n if '/home' in tmplt['top_outdir'][:7]:\n raise RuntimeError('Image data cannot be stored in the /home partition. Change the \"top_outdir\" entry in the template file: %s.' % (' '.join(self.templates)))\n# tmplt['subject'] = 'orig'\n self.procdir = os.path.abspath(\"%s/%s\" % \\\n (tmplt['top_outdir'],tmplt['subject']))\n target = os.path.abspath('%s/../..' % tmplt['top_outdir'])\n if not ismounted(target):\n raise RuntimeError('Could not access partition at %s' % target)\n\n self.anatdir = \"%s/anat\" % self.procdir\n self.fmapdir = \"%s/%s\" % (self.procdir,tmplt['fmap']['outdir'])\n self.dtidir = \"%s/%s\" % (self.procdir,tmplt['dti']['outdir'])\n self.logdir = \"%s/%s\" % (self.procdir,tmplt['logdir'])\n self.skip = tmplt.get('skip', DEFAULT_SKIP)\n self.acq_tr = tmplt.get('acq_tr',None)\n self.episetup_dir = \"%s/%s\" % (self.procdir,tmplt['first_epi'])\n self.fsl_cmpblty = tmplt.get('fsl_compatibility',False)\n self.epi_file_format = self.tmplt['epi_file_format']\n self.censor_thresh = tmplt.get('censor_threshold', 2.)\n self.censor_interleave = tmplt.get('censor_interleave', True)\n# self.server_userid = self.tmplt.get('server_userid','default')\n\n# Overide flags for aligning EPIs and skull-stripping with command-\n# line options.\n if self.opts.align_fmaps:\n self.align_fmaps = True\n else:\n self.align_fmaps = self.tmplt.get('epi_align', False)\n\n if self.opts.no_align_fmaps:\n self.no_align_fmaps = True\n else:\n self.no_align_fmaps = self.tmplt.get('no_epi_align', False)\n\n if self.opts.skull_strip:\n self.skull_strip = True\n else:\n self.skull_strip = self.tmplt.get('skull_strip', False)\n\n# Create log file now so it can be used immediately.\n if not os.path.exists(self.logdir):\n if self.verbose:\n print 'mkdir %s' % self.logdir\n if not self.opts.fake_opts:\n self.MakeDir(self.logdir)\n\n self._ProcessTemplateEpiInfo()", "def template(name=None, url=None):\n if name is not None:\n filename = '%s.tar.gz' % name\n else:\n filename = os.path.basename(url)\n\n if not is_file(os.path.join('/var/lib/vz/template/cache', filename)):\n openvz.download_template(name, url)", "def process_template(self, component):\n destination = os.path.join(self.pubchem_templates, f\"{component.id}.sdf\")\n downloaded = download_template(destination, component.id, component.inchikey)\n\n if downloaded:\n rescale_molecule(destination, 1.5)\n\n return downloaded", "def prepare_gz(self, filename, *args, **kwargs):\n\n return '/vsigzip/' + filename, args, kwargs" ]
[ "0.59726363", "0.5919844", "0.5840984", "0.57774436", "0.57668006", "0.57397795", "0.560909", "0.55987394", "0.558662", "0.5572569", "0.5498331", "0.54975486", "0.5460308", "0.54522187", "0.54182094", "0.5418069", "0.54033256", "0.53956103", "0.53526306", "0.5346858", "0.53449756", "0.532618", "0.52976906", "0.52841944", "0.526089", "0.52334064", "0.5224961", "0.5220392", "0.5217755", "0.5216062" ]
0.64534384
0
Get the function url for HTTP call
def get_function_url(is_async, base_url, function_name): function_url = None if is_async is True: function_url = "{}/async-function/{}".format(base_url, function_name) else: function_url = "{}/function/{}".format(base_url, function_name) logging.debug("Get Function URL. Is Async: {}, Function URL: {}".format(is_async, function_url)) return function_url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def url():\n ...", "def Url(self) -> str:", "def gen_query_url(self, url, function, format=None, method=None, get_args=None):\n function = self.namespace_map[function]\n return '%s/%s' % (url, function)", "def get_url(self, *args, **kwargs):\n raise NotImplementedError", "def url(self):\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''", "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"", "def get_url():\n if os.environ['SERVER_PORT'] == '80':\n scheme = 'http://'\n else:\n scheme = 'https://'\n host = os.environ['SERVER_NAME']\n script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))\n path_info = urllib.quote(os.environ.get('PATH_INFO', ''))\n qs = os.environ.get('QUERY_STRING', '')\n if qs:\n qs = '?' + qs\n return scheme + host + script_name + path_info + qs", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def urlpath( request, *args, **kwargs ):", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def url(self) -> str:\n return self.HTTP.url if self.HTTP else self._url", "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "def urlfor( request, *args, **kwargs ):", "def url(self) -> str:\n return self._request.url.path", "def getURLForThing(thing):", "def get_endpoint_url(endpoint):\n return urljoin(api_url_base(), endpoint)", "def geturl(self):\n return self.__url", "def url(self):\n ...", "def _endpoint(resource, method):\n\tif method=='GET' or SERVER_NAME.startswith('localhost:') or SERVER_NAME.startswith('127.0.0.1:'):\n\t\tprotocol = 'http'\n\telse:\n\t\tprotocol = 'https'\n\tif PARLIAMENT:\n\t\turl = '%s://%s/%s/%s' % (protocol, SERVER_NAME, PARLIAMENT, resource)\n\telse:\n\t\turl = '%s://%s/%s' % (protocol, SERVER_NAME, resource)\n\treturn url", "def _get_url(self, absolute):", "def _get_url(context, actual, attribute_name, port):\n return actual or _get_api_url(context, attribute_name, port)", "def _get_method_url(self):\n formatter = \"json\"\n if self.method:\n url = \"%s/%d/%s/%s.%s\" % (self.base_url, self.version,\n self.account, self.method,\n formatter)\n request_url = requests.head(url, params=None, proxies=self.proxies)\n request_url.raise_for_status()\n return url\n else:\n raise TypeError", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s%s' % (self.endpoint, self.methodname)\n else:\n return '%s%s?%s' % (self.endpoint, self.methodname, kwargs)", "def url(request):\n return request.config.getoption(\"--url\")", "def url(self):\n _, body = self.request('/v1.1/url', 'GET')\n return body.get('url', None)", "def url(request):\n URL = namedtuple('URL', ['mainnet', 'testnet'])\n\n # For actual trading and market data, the mainnet URL will be used:\n # When developing application, the testnet URL should be used:\n url = URL(mainnet=\"https://api.switcheo.network/v2/\", testnet=\"https://test-api.switcheo.network/v2/\")\n\n def tear_down():\n # clean up here\n pass\n\n request.addfinalizer(tear_down)\n return url", "def getFlixConnectURL(self):\n output = \"http://\" + Mode().get(\"[flixConnectServer]\") + \":\" + Mode().get(\"[flixConnectPort]\") + \"/\"\n\n# print \"ServerFlixFunctions - getFlixConnectURL url:\", output\n\n return output", "def get_url():\n key = _get_key()\n return key.generate_url(300)", "def get_url(self):\n return self.url" ]
[ "0.7090844", "0.68663275", "0.6810908", "0.6800608", "0.6628658", "0.65886503", "0.6585411", "0.65614337", "0.6500577", "0.64672434", "0.64541775", "0.6430163", "0.64008325", "0.63645357", "0.6343626", "0.63189983", "0.6311656", "0.6309808", "0.6262192", "0.6255734", "0.6238082", "0.62223685", "0.62078345", "0.61940163", "0.61798143", "0.61699736", "0.61658496", "0.6160383", "0.61442155", "0.61408293" ]
0.7248379
0
Retrieves all tasks with the specified ddmreqid
def taskbyddmreqid(self, **kwargs): rows = self.api.query(None, None, self.Task.TaskByDdmReqid_sql, ddmreqid=kwargs["ddmreqid"]) return rows
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def get_tasks_of_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE project_id=? ORDER BY project_order\", (project_id,))\n return res.fetchall()", "def get_tasks(taskid_list, module):\n tasks = module.client.api.get_tasks_by_status('Pending')\n task_list = list()\n for task in tasks:\n if task['workOrderId'] in taskid_list:\n task_list.append(task)\n return task_list", "def get_tasks(id):\n url = 'https://jsonplaceholder.typicode.com/'\n tasks = requests.get(url + 'todos', params={'userId': id}).json()\n return tasks", "def db_get_task(task_id):\n sql = \"SELECT * FROM {} WHERE id=?\".format(TABLE_NAME)\n return db_query(sql, (task_id,), True)", "def get_tasks_list(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n permission = has_project_permission(project, g.user)\n return jsonify(\n {\n \"success\": True,\n \"result\": {\n 'created_tasks': tasks_schema.dump(Task.query.filter_by(created_by_id = g.user.id).all()),\n 'tasks_you_work_on': tasks_schema.dump(g.user.tasks).all(),\n 'all': tasks_schema.dump(Task.query.filter(or_(\n Task.created_by_id==g.user.id, Task.project_id==g.user.project.id\n )).all()),\n },\n \"message\": \"Successfully fetched all tasks.\",\n }\n )", "def getProjectTasks(self, pid, archived=False):\n return self.request(Endpoints.PROJECTS + '/{0}'.format(pid) + '/tasks')", "def find_task_by_id(self, task_id):\n return self._tasks_by_id[task_id]", "def find_task_by_id(self, task_id):\n return self._tasks_by_id[task_id]", "def get_task(self, task_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE id=?\", (task_id,))\n return res.fetchone()", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "def get_tasks(*, dag_id: str, order_by: str = \"task_id\") -> APIResponse:\n dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)\n if not dag:\n raise NotFound(\"DAG not found\")\n tasks = dag.tasks\n\n try:\n tasks = sorted(tasks, key=attrgetter(order_by.lstrip(\"-\")), reverse=(order_by[0:1] == \"-\"))\n except AttributeError as err:\n raise BadRequest(detail=str(err))\n task_collection = TaskCollection(tasks=tasks, total_entries=len(tasks))\n return task_collection_schema.dump(task_collection)", "def get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)", "def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def taskdetail_get(td_id):\n return IMPL.taskdetail_get(td_id)", "def get_subtasks(self, tid):\n return self.task_controller.get_subtasks(tid)", "def get(self, project_id):\n try:\n pagination_args = get_pagination_args(request)\n except ArgumentError as e:\n return {'message': e.message}, 500\n\n limit = pagination_args['limit'] if 'limit' in pagination_args else self.DEFAULT_LIMIT\n offset = pagination_args['offset'] if 'offset' in pagination_args else self.DEFAULT_OFFSET\n\n tasks = backend.filter(Task, {'project.pk': request.project.pk},\n include=('project',), only=TaskDetails.export_fields, raw=True\n ).sort('created_at', -1)\n\n return {'tasks': [TaskDetails.export(task) for task in tasks[offset:offset + limit]]}, 200", "def get_task_by_id(self, task_id):\n return self._gdb_interface.get_task_by_id(task_id)", "def retrieve_task(self, task_id):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_TASK_STATUS,\n str(task_id)]))\n return r.json()", "def get(self, task_id):\n try:\n return self.dal.task.get_by_id(task_id)\n except EntityNotFound:\n raise DoesNotExist()", "def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()", "def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)", "async def list_tasks():", "def getNodeTaskByUPID(self,node,upid):\n data = self.connect('get','nodes/%s/tasks/%s' % (node,upid),None)\n return data", "def get_task_by_id(task_id):\n result = mongo.db.tasks.find({\"_id\": ObjectId(task_id)})\n return json_util.dumps(result)", "def get(self, id):\n\n return self.client.get(\"external-task/{0}\".format(id))", "def query_tasks(dbh):\n # assumes attempt task_ids are in gtt_id table\n\n curs = dbh.cursor()\n\n # query the task table\n sql = \"select t.label,t.exec_host,t.name,t.status,t.id,t.parent_task_id,t.start_time,t.root_task_id, SYSTIMESTAMP-t.start_time as length from task t, gtt_id g where t.root_task_id=g.id and (end_time is null or status!=0)\"\n curs.execute(sql)\n desc = [d[0].lower() for d in curs.description]\n\n results = {}\n for row in curs:\n dat = dict(zip(desc, row))\n if dat['root_task_id'] not in results:\n results[dat['root_task_id']] = {}\n results[dat['root_task_id']][dat['id']] = dat\n\n sql = \"select s.request_time,s.grant_time,s.release_time, t.exec_host,t.name,t.id,t.root_task_id,t.parent_task_id,t.start_time from seminfo s, task t, gtt_id g where t.root_task_id=g.id and t.name like 'trans%' and t.id=s.task_id and t.end_time is NULL\"\n curs.execute(sql)\n desc = [dd[0].lower() for dd in curs.description]\n\n results_trans = {}\n for row in curs:\n dat = dict(zip(desc, row))\n if dat['root_task_id'] not in results_trans:\n results_trans[dat['root_task_id']] = {}\n results_trans[dat['root_task_id']][dat['id']] = dat\n\n return results, results_trans", "def get_task(self, id):\n raise NotImplementedError()", "def get_task_details_by_id(self,params=['ng2157']):\n result =[]\n query_params = {'attid':params[0]}\n query = \"\"\"select a.Attuid,a.Status,a.Severity,a.TaskDetails,a.Remarks,a.StartDate,a.EndDate,a.TaskFinishDate,a.InsertDate,a.InsertedBy,a.UpdateDate,a.UpdatedBy\n from s08_DB.Alltasks a\n where a.Attuid =:attid\"\"\".replace('\\n',' ')\n with vertica_python.connect(**conn_info) as connection:\n logging.debug(\"Connected to {} on host{} \".format(conn_info['database'],conn_info['host']))\n logging.info(\"The read SQL -> {} \".format(query))\n cur = connection.cursor()\n cur.execute(query,query_params)\n for row in cur.iterate():\n result.append(row)\n return(result)", "def get_completed_tasks_in_tod():\n try:\n tod_file_data = load_data(os.getenv('TOD_FP'))\n except FileNotFoundError:\n return []\n completed_tasks = []\n tod_file_data = tod_file_data.split('\\n')\n\n for line in tod_file_data:\n if line == '' or line[0] != '[' or line[1] != 'X':\n continue\n completed_task = (f\"{line[4:-7]} {line[-6:]}\"\n if line[-6:] != '(0:00)'\n else line[4:-7])\n completed_tasks.append(completed_task)\n\n return completed_tasks" ]
[ "0.646642", "0.63193464", "0.61650157", "0.5977577", "0.5922998", "0.5823428", "0.5785389", "0.5712846", "0.5712846", "0.57020664", "0.5684275", "0.5683496", "0.56273025", "0.5614078", "0.55910546", "0.5587263", "0.5565647", "0.5552744", "0.55248016", "0.5497558", "0.5493142", "0.5492324", "0.5464788", "0.54514277", "0.54193103", "0.54067415", "0.53874946", "0.53818375", "0.53681755", "0.5312453" ]
0.8535213
0
Change last publication time for task.
def updatepublicationtime(self, **kwargs): if 'workflow' not in kwargs or not kwargs['workflow']: raise InvalidParameter("Task name not found in the input parameters") workflow = kwargs['workflow'] authz_owner_match(self.api, [workflow], self.Task) #check that I am modifying my own workflow self.api.modify(self.Task.UpdatePublicationTime_sql, workflow=[workflow]) return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_last_submission_time(self):\r\n self.last_submission_time = datetime.datetime.now(UTC())", "def republish(self):\n self.timestamp = time.time()", "def publish(self):\n self.published_date = timezone.now\n self.save()", "def publish(self):\n self.published_date = timezone.now()\n self.save()", "def publish(self):\n self.published_date = timezone.now()\n self.save()", "def set_time_to_process_last_submission(self, seconds: int):\n self.snapshot['time_to_process_last_submission'] = seconds", "def publish(self):\n\n\t\tself.patient_update_dt = timezone.now()\n\t\tself.save()", "def save(self, *args, **kwargs):\n if self.published and not self.published_on:\n self.published_on = timezone.now()\n super(NewsItem, self).save(*args, **kwargs)", "def save(self, **kwargs):\n\t\tif self.pk:\n\t\t\tself.topic_modification_date = datetime.now()\n\t\tsuper(Topic, self).save(**kwargs)", "def setSubmitTime(t):", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def set_creation_time(self, t: int) -> None:\n self.metadata.data[\"creation_time\"] = t", "def published(self):\n xutimes = self.xutimes()\n\n # If there are 2 xutimes, published is the second. Otherwise, it is the\n # first and only xutime.\n\n offset = 0 if len(xutimes) == 1 else 1\n\n return dt.fromtimestamp(int(xutimes[offset]))", "def touch(self):\n self._timestamps['last_seen'] = rospy.get_rostime()", "def save(self, *args, **kwargs):\n if self.is_published and not self.published_on:\n self.published_on = timezone.now()\n else:\n try:\n # Get the old object currently in the database\n old_object = Contribution.objects.get(pk=self.pk)\n except Contribution.DoesNotExist:\n pass\n else:\n # If the object was republished, change the datetime\n if not old_object.is_published and self.is_published:\n self.published_on = timezone.now()\n \"\"\" Always add last_modified_on date \"\"\"\n self.last_modified_on = timezone.now()\n super(Contribution, self).save(*args, **kwargs)", "def setLastModified(when):", "def _set_last_time(self, cur_time):\n self._last_time = cur_time", "def _set_last_time(self, cur_time):\n self._last_time = cur_time", "def take(self):\n self.when_taken = datetime.datetime.now().timestamp()", "def getPublishedTime(self): #$NON-NLS-1$\r", "def post_generation(self, task):\n task['executiontime'] = self.etrvs.sample(task['pkg'])\n task['criticaltime'] = 4 * task['executiontime']\n if self.last_release:\n task['offset'] = task['release'] - self.last_release\n else:\n task['offset'] = task['release']\n self.last_release = task['release']", "def feltBump(self):\n self.stamp = rospy.Time.now()\n self.ready_to_publish = True", "def save(self, *args, **kwargs):\n self.modify_ts = datetime.now()\n super(ModelBase, self).save(*args, **kwargs)", "def set_last_fetch_time(last_fetch_time):\n with open(LAST_FETCH_TIME_FILE, 'w') as f:\n f.write(last_fetch_time)", "def set_remain_time(self, time):\n for task in self.tasks:\n task.remain_time = time", "def updatePullDate(self):\n self.startTime = datetime.now()", "def _set_timestamp(self):\n d = datetime.now()\n self._time_stamp = \"{:>2} {} {} {:>2}:{:>02}\".format(\n d.day, MONTH_ABBREV[d.month], d.year, d.hour, d.minute)", "def update_time(self):\n pass # Do nothing", "def change_time(self, new_time):\r\n self.when = new_time", "def lastmod_time(self, lastmod_time):\n\n self._lastmod_time = lastmod_time" ]
[ "0.6860852", "0.6508761", "0.6296663", "0.6267985", "0.6267985", "0.6241661", "0.6238545", "0.62332857", "0.6210353", "0.60572064", "0.60487616", "0.5986359", "0.5923287", "0.5916307", "0.5885974", "0.58302766", "0.57849276", "0.57849276", "0.5783901", "0.57617533", "0.57405746", "0.5690304", "0.5679177", "0.56292343", "0.5602856", "0.5579473", "0.55086553", "0.54708815", "0.543468", "0.5416315" ]
0.7061756
0
Loads sequence data from a CoNLL format data set given at `self.directory`. For the CoNLL formatted dataset given at `self.directory`, updates `self.type_seq` with lists containing the word, character and tag sequences for the train and, if provided, valid/test partitions found at `self.directory/train.`, `self.directory/valid.` and `self.directory/test.`.
def _get_type_seq(self): for partition, filepath in self.directory.items(): if filepath is not None: conll_file = os.path.basename(filepath) # get name of conll file # collect sequence data sents = list(self.conll_parser.sents(conll_file)) tagged_sents = list(self.conll_parser.tagged_sents(conll_file)) word_seq = Preprocessor.replace_rare_tokens(sents) if self.replace_rare_tokens else sents char_seq = [[[c for c in w] for w in s] for s in sents] tag_seq = [[t[-1] for t in s] for s in tagged_sents] # update the class attributes self.type_seq[partition] = {'word': word_seq, 'char': char_seq, 'tag': tag_seq}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\n if self.directory is None:\n err_msg = \"`Dataset.directory` is None; must be provided before call to `Dataset.load`\"\n LOGGER.error('ValueError %s', err_msg)\n raise ValueError(err_msg)\n\n # unique words, chars and tags from CoNLL formatted dataset\n types = self._get_types()\n # map each word, char, and tag type to a unique integer\n self._get_idx_maps(types)\n\n # get word, char, and tag sequences from CoNLL formatted dataset\n self._get_type_seq()\n # get final representation used for training\n self.get_idx_seq()\n\n # useful during prediction / annotation\n self.idx_to_tag = generic_utils.reverse_dict(self.type_to_idx['tag'])", "def load_sequence_labelling_dataset(step, do_lower_case,data_type,data_subtype):\n assert step in ['train', 'test']\n path = os.path.join(DATA_PATH, 'sequence_labelling', f'{step}.txt')\n i = 0\n examples = []\n with open(path, 'r', encoding='utf-8') as data_file:\n lines = data_file.readlines()\n token_sequence = []\n label_sequence = []\n for line in tqdm(lines, desc=f'reading `{os.path.basename(path)}`...'):\n # example:\n # My O\n # name O\n # is O\n # Hicham B-PER\n # . O\n splitline = line.strip().split()\n if splitline:\n token, label = splitline\n token_sequence.append(token)\n label_sequence.append(label)\n else:\n examples.append(\n SequenceLabellingExample(\n id=i,\n token_sequence=token_sequence,\n label_sequence=label_sequence,\n )\n )\n i += 1\n token_sequence = []\n label_sequence = []\n\n # Don't forget to add the last example\n if token_sequence:\n examples.append(\n SequenceLabellingExample(\n id=i,\n token_sequence=token_sequence,\n label_sequence=label_sequence,\n )\n )\n\n retokenize(\n examples,\n tokenization_function=BasicTokenizer(do_lower_case=do_lower_case).tokenize)\n logging.info('Number of `%s` examples: %d', step, len(examples))\n return examples", "def load_file(self, dset_type):\r\n path = './data/{0}.{1}'.format(self.name, dset_type)\r\n try:\r\n file_contents = np.genfromtxt(path, missing_values=0, skip_header=0,\r\n dtype=int, delimiter=\",\")\r\n self.labels[dset_type] = file_contents[:, 0]\r\n self.examples[dset_type] = file_contents[:, 1:]\r\n\r\n except RuntimeError:\r\n print('ERROR: Unable to load file ''{0}''. Check path and try again.'.format(path))", "def loadData(self, dataType): \n if dataType == \"train\":\n f = self.urls[0]\n elif dataType == \"valid\":\n f = self.urls[1]\n elif dataType == \"test\":\n f = self.urls[2] \n\n \"\"\" Load text file \"\"\"\n corpus = list()\n with io.open(f, encoding='UTF-8') as f:\n for line in f: \n if len(line) > self.minSeq and len(line) < self.maxLen:\n corpus.append(line.lstrip().rstrip().split(' '))\n return corpus", "def sequence_loader(\n data_path: str,\n index_path: typing.Union[str, None],\n context_description: typing.Union[\n typing.List[str], typing.Dict[str, str], None\n ] = None,\n features_description: typing.Union[\n typing.List[str], typing.Dict[str, str], None\n ] = None,\n shard: typing.Optional[typing.Tuple[int, int]] = None,\n compression_type: typing.Optional[str] = None,\n) -> typing.Iterable[\n typing.Tuple[\n typing.Dict[str, np.ndarray], typing.Dict[str, typing.List[np.ndarray]]\n ]\n]:\n typename_mapping = {\n \"byte\": \"bytes_list\",\n \"float\": \"float_list\",\n \"int\": \"int64_list\"\n }\n\n record_iterator = tfrecord_iterator(\n data_path=data_path,\n index_path=index_path,\n shard=shard,\n compression_type=compression_type,\n )\n\n for record in record_iterator:\n example = example_pb2.SequenceExample()\n example.ParseFromString(record)\n\n context = extract_feature_dict(example.context, context_description, typename_mapping)\n features = extract_feature_dict(example.feature_lists, features_description, typename_mapping)\n\n yield context, features", "def load_classification_dataset(step, do_lower_case,data_type,data_subtype,use_syntetic_data):\n assert step in ['train', 'test']\n binary = False \n undersample_majority = False\n\n paths = ['~/Github/Data/Patient/NIRADS/PET_CT_NIRADS.xlsx', '~/Github/Data/Patient/NIRADS/MR_NIRADS_2018.xlsx','~/Github/Data/Patient/NIRADS/MR_NIRADS.xlsx']\n if data_type == 'ct':\n data_r = pd.read_excel(paths[0])\n else:\n data_r = pd.read_excel(paths[1])\n data_r.append(pd.read_excel(paths[2]), ignore_index = True, sort=False)\n\n data_p,data_n, y_p, y_n = tc.text_cleaning(data_r, None, data_target='section') \n\n if data_subtype == 'primary':\n data = data_p\n y = y_p -1\n else:\n data = data_n\n y = y_n -1\n\n if binary:\n y[y<2]=0\n y[y>0]=1\n\n y_dist = [np.sum(y==x) for x in np.unique(y)]\n print(\"Distribution of all labels: \", y_dist, \"\\n\\n\")\n\n train_text, test_text, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=1)\n\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels: \", y_dist, \"\\n\\n\")\n\n if step =='train':\n if use_syntetic_data:\n data_syntetic = pd.read_csv('~/Github/Data/Patient/NIRADS/PET_CT_NIRADS_syntetic.csv')\n train_text = np.concatenate((train_text,data_syntetic['syntetic_data'].values))\n y_train = np.concatenate((y_train,data_syntetic['syntetic_label'].values-1))\n\n train_text, test_text, y_train, y_test = train_test_split(train_text, y_train, test_size=0.5, random_state=1)\n train_text = np.concatenate((train_text,test_text))\n y_train = np.concatenate((y_train,y_test))\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels after inserting syntetic data: \", y_dist, \"\\n\\n\")\n\n if not undersample_majority:\n data_to_use = train_text.copy()\n y_to_use = y_train.copy()\n else:\n max_label1 = 1000\n data_to_use = []\n y_to_use = []\n y1=0\n for x in range(len(y_train)):\n if y_train[x] !=1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n else:\n if y1 <max_label1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n y1+=1\n\n else:\n data_to_use = test_text.copy()\n y_to_use = y_test.copy()\n\n basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n examples = []\n\n for i, tokens in tqdm(enumerate(data_to_use)):\n label = y_to_use[i]\n examples.append(\n ClassificationExample(\n id=i,\n tokens_a=basic_tokenizer.tokenize(tokens),\n tokens_b=None,\n label=label,\n )\n )\n logging.info('Number of `%s` examples: %d', step, len(examples))\n \n return examples", "def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels", "def get_train_data(sequence_length=100):\n\n network_input = list()\n network_output = list()\n notes = read_binary_file(str(data_dir / \"notes.pkl\"))\n\n # get all pitch names\n pitch_names = sorted(set(item for item in notes))\n # Embedding #TODO use keras Embedding layer instead\n note_to_int = read_binary_file(metadata_dir / \"note_to_int.pkl\")\n vocab_size = len(set(note_to_int))\n\n # create input sequences and the corresponding outputs\n for i in range(0, len(notes) - sequence_length, 1):\n sequence_in = notes[i : i + sequence_length]\n sequence_out = notes[i + sequence_length]\n network_input.append([note_to_int[char] for char in sequence_in])\n network_output.append(note_to_int[sequence_out])\n\n n_patterns = len(network_input)\n # reshape the input into a format compatible with LSTM layers\n network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))\n # normalize input\n network_input = network_input / float(vocab_size)\n network_output = np_utils.to_categorical(network_output)\n\n with open(metadata_dir / \"sequence_in.pkl\", \"wb\") as f:\n pickle.dump(network_input, f)\n with open(metadata_dir / \"sequence_out.pkl\", \"wb\") as f:\n pickle.dump(network_output, f)\n return network_input, network_output, vocab_size", "def load_dataset(sequence_length=10):\n train_x = []\n train_y = []\n notes_to_emotion = []\n song_index_to_notes = get_notes()\n song_index_to_emotion = get_emotions()\n\n for index, notes in song_index_to_notes.items():\n if index in song_index_to_emotion:\n notes_to_emotion.append((notes, song_index_to_emotion[index]))\n\n for notes, emotion in notes_to_emotion:\n # get all pitch names\n pitchnames = sorted(set(item for item in notes))\n\n # create a dictionary to map pitches to integers\n note_to_int = dict((note, number) for number, note in enumerate(pitchnames))\n for i in range(0, int(len(notes)) - sequence_length):\n music_in = notes[i: i + sequence_length]\n train_x.append([note_to_int[char] for char in music_in])\n train_y.append(emotion)\n\n print(\"train_x has shape: \", len(train_x))\n print(\"train_y has shape: \", len(train_y))\n\n return (np.asarray(train_x), np.asarray(train_y))", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def trainData(self,):\n count = 0\n while count < len(self.RAD_sequences_train):\n RAD_filename = self.RAD_sequences_train[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"train_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1\n if count == len(self.RAD_sequences_train) - 1:\n # np.random.seed() # should I add seed here ?\n np.random.shuffle(self.RAD_sequences_train)", "def load_coco_ann_files(self):\n if self.type == 'train':\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'train2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_train2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'train2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_train2017.json'))),\n # (os.path.join(self.dataset_root, 'mpii', 'images'),\n # COCO(os.path.join(self.dataset_root, 'mpii',\n # 'annotations', 'train.json')))\n ]\n else:\n datasets = [\n (os.path.join(self.dataset_root, 'coco', 'val2014'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2014', 'person_keypoints_val2014.json'))),\n (os.path.join(self.dataset_root, 'coco', 'val2017'),\n COCO(os.path.join(self.dataset_root, 'coco',\n 'annotations_trainval2017', 'person_keypoints_val2017.json')))\n ]\n\n dict_list = []\n for dataset_path, dataset in datasets:\n img_ids = dataset.getImgIds()\n\n for idx in img_ids:\n try:\n img = dataset.loadImgs([idx])[0]\n ann_ids = dataset.getAnnIds([idx])\n anns = dataset.loadAnns(ann_ids)\n\n if [ann['keypoints'] for ann in anns] and not all([ann['keypoints'] == [0]*51 for ann in anns]):\n keypoints = [ann['keypoints'] for ann in anns if ann['keypoints'] != [0]*51]\n for i in range(len(keypoints)):\n if 'coco' in dataset_path:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][17] and keypoints[i][20])\n else [(keypoints[i][15] + keypoints[i][18]) // 2, (keypoints[i][16] + keypoints[i][19]) // 2, 1])\n else:\n keypoints[i] = keypoints[i] + ([0, 0, 0] if not (keypoints[i][41] and keypoints[i][38])\n else [(keypoints[i][39] + keypoints[i][36]) // 2, (keypoints[i][40] + keypoints[i][37]) // 2, 1])\n\n if len([kp for kp in keypoints if kp != [0]*54]) <= 4:\n dict_list.append({'path': os.path.join(dataset_path, img[\"file_name\"]),\n 'keypoints': [kp for kp in keypoints if kp != [0]*54]})\n except:\n print(f'Skipped: {idx}')\n\n final_dataset = pd.DataFrame.from_dict(dict_list)\n\n return final_dataset", "def initial_sequence_loading(self, work_dir: str):\n # preprocess FASTA with sequences\n # rename IUPAC to N symbols using sed\n fasta_raw = self.from_param(\"manifest_data\", \"fasta_dna\")\n fasta_clean = self.pjc(work_dir, \"fasta\", \"seq_no_iupac.fasta\")\n self.remove_IUPAC(fasta_raw, fasta_clean)\n\n # start coord system ranking and agps processing\n agps = self.from_param(\"manifest_data\", \"agp\", not_throw = True)\n\n # rank cs_names, met in agps.keys (\"-\" separated, i.e. \"scaffold-contig\") based on cs_order\n # use noagp_cs_name_default for \"noagp\" assemblies\n cs_order = self.coord_sys_order(self.param(\"cs_order\"))\n noagps_cs = self.param(\"noagp_cs_name_default\")\n cs_rank = self.used_cs_ranks(agps, cs_order, noagps_cs)\n\n # remove gaps and lower_level mappings if the are coveres by higher level ones\n # i.e.: remove 'contigN to chromosomeZ', if 'contigN to scaffoldM' and 'scaffoldM to chromosomeZ' are in place\n # returns None if no agps provided\n agps_pruned_dir = self.pjc(work_dir, \"agps_pruned\")\n agps_pruned = self.prune_agps(agps, cs_order, agps_pruned_dir, self.param_bool(\"prune_agp\"))\n\n # empty agps_pruned ignored\n self.load_seq_data(fasta_clean, agps_pruned, cs_rank, self.pjc(work_dir, \"load\"))\n\n # mark all the \"contig\"s or noagp_cs as being sourced from ENA\n if not self.param_bool(\"no_contig_ena_attrib\"):\n if agps is None:\n self.add_contig_ena_attrib(self.pjc(work_dir, \"load\", \"set_ena\"), cs_name = noagps_cs)\n else:\n self.add_contig_ena_attrib(self.pjc(work_dir, \"load\", \"set_ena\"))\n\n # unversion scaffold, remove \".\\d$\" from names if there's a need\n if self.param_bool(\"unversion_scaffolds\"):\n self.unversion_scaffolds(cs_rank, self.pjc(work_dir, \"unversion_scaffolds\"))\n\n # add assembly mappings between various cs to meta table for the mapper to work properly\n cs_pairs = agps_pruned and agps_pruned.keys() or None\n self.add_asm_mappings(cs_pairs, self.pjc(work_dir, \"asm_mappings\"))\n\n # set toplevel seq_region attribute\n self.set_toplevel(self.pjc(work_dir, \"set_toplevel\"), self.param(\"not_toplevel_cs\"))\n\n # nullify contig version and update mappings strings accordingly; ignore for \"load_additional_sequences\" mode\n if not self.param_bool(\"load_additional_sequences\"):\n self.nullify_ctg_cs_version(self.pjc(work_dir, \"asm_mapping\", \"nullify_cs_versions\"))", "def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n paths = self.args.data.split(':')\n assert len(paths) > 0\n data_path = paths[(epoch - 1) % len(paths)]\n\n # infer langcode\n \n lg_datasets = []\n for lg in self.gt_langs:\n src, tgt = lg, lg \n bos_id = self.tgt_dict.index('[{}]'.format(lg))\n data_path_lg = os.path.join(data_path, lg)\n dataset = load_generation_pair_dataset(\n data_path_lg, split, tgt, self.src_dict, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=getattr(self.args, 'max_source_positions', 1024),\n max_target_positions=getattr(self.args, 'max_target_positions', 1024),\n load_alignments=self.args.load_alignments,\n prepend_bos=getattr(self.args, 'preprend_bos', False),\n append_source_id=True,\n common_eos=self.args.common_eos,\n lg_id=bos_id\n )\n lg_datasets.append(dataset)\n \n dataset_lengths = np.array([len(d) for d in lg_datasets], dtype=float) \n\n sample_probs = self._get_sample_prob(dataset_lengths)\n logger.info(\"| Sample probability by language: \", {\n lang: \"{0:.4f}\".format(sample_probs[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths\n logger.info(\"| Up/Down Sampling ratio by language: \", {\n lang: \"{0:.2f}\".format(size_ratio[id])\n for id, lang in enumerate(self.gt_langs)\n }\n )\n if split == getattr(self.args, \"train_subset\", \"train\"):\n resampled_lang_datasets = [\n ResamplingDataset(\n lg_datasets[i],\n size_ratio=size_ratio[i],\n seed=self.args.seed,\n epoch=epoch,\n replace=size_ratio[i] >= 1.0,\n )\n for i, d in enumerate(lg_datasets)\n ]\n dataset = ConcatDataset(\n resampled_lang_datasets,\n )\n else:\n dataset = ConcatDataset(lg_datasets)\n lang_splits = [split]\n for lang_id, lang_dataset in enumerate(lg_datasets):\n split_name = split + '_' + self.gt_langs[lang_id]\n lang_splits.append(split_name)\n self.datasets[split_name] = lang_dataset\n \n if hasattr(self.args, \"valid_subset\"):\n if split in self.args.valid_subset:\n self.args.valid_subset = self.args.valid_subset.replace(\n split, ','.join(lang_splits)\n )\n\n with data_utils.numpy_seed(self.args.seed + epoch):\n shuffle = np.random.permutation(len(dataset))\n self.datasets[split] = SortDataset(\n dataset,\n sort_order=[\n shuffle,\n dataset.sizes,\n ],\n )", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def read_from_file(self, file_type):\n\n doc_idx, sentence_list_idx, tag_idx = [], [], []\n\n file_name = self.config.parser[file_type + '_dir']\n with open(file_name, 'r') as f:\n\n new_batch_doc_idx, new_batch_sentence_list_idx,\\\n new_batch_tag_idx = [], [], []\n new_sentence_idx = [] # for a sentence\n # temprate variable to store current batch data\n\n for idx, line in enumerate(f.readlines()):\n if idx == 95:\n xu = 1\n contents = line.strip().split(' ')\n # an empty line, means seperator for two batch\n # doc id, means a new batch whose `docid` is doc id\n # a word and its tag sepaerated by a blank\n if len(contents) >= 2:\n word, role = contents[0], contents[1]\n new_batch_doc_idx.append(\n self.word_dict.get_word_index(word)\n )\n new_batch_tag_idx.append(\n self.tag_dict.get_word_index(role)\n )\n new_sentence_idx.append(\n self.word_dict.get_word_index(word)\n )\n if word is '.':\n # default: '.' is the seperator for two sentences.\n new_batch_sentence_list_idx.append(new_sentence_idx)\n new_sentence_idx = []\n elif len(contents) == 1 and contents[0] != '':\n\n new_batch_doc_idx, new_batch_sentence_list_idx,\\\n new_batch_tag_idx = [], [], []\n new_sentence_idx = [] # for a sentence\n # temprate variable to store current batch data\n\n elif len(contents) == 1 and contents[0] == ''\\\n and len(new_batch_doc_idx) < self.config.parser['HP_max_len']:\n\n # Sometimes a sentence is not terminated by `.`\n # It will cause bug without this judgement.\n if len(new_sentence_idx) > 0:\n new_batch_sentence_list_idx.append(new_sentence_idx)\n new_sentence_idx = []\n doc_idx.append(new_batch_doc_idx)\n sentence_list_idx.append(new_batch_sentence_list_idx)\n tag_idx.append(new_batch_tag_idx)\n\n return doc_idx, sentence_list_idx, tag_idx", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_data(del_file, dup_file, non_file,\n seq_length=500, \n channels_first=True,\n normalize_data=False):\n deletions = pd.read_pickle(del_file)\n duplications = pd.read_pickle(dup_file)\n non_sv = pd.read_pickle(non_file)\n\n # combine data and create labels\n data = np.concatenate((\n non_sv.data.values,\n deletions.data.values,\n duplications.data.values\n ))\n\n labels = np.concatenate((\n np.zeros((len(non_sv),)),\n np.full((len(deletions,)), fill_value=1),\n np.full((len(duplications,)), fill_value=2)\n ))\n labels = tf.keras.utils.to_categorical(labels)\n\n # make fixed length sequences\n data_padded = [\n tf.keras.preprocessing.sequence.pad_sequences(\n d, maxlen=seq_length, \n padding='post',\n truncating='post',\n dtype='float32')\n for d in data]\n data_padded = np.array(data_padded)\n\n if normalize_data:\n # data_padded = tf.keras.utils.normalize(data_padded)\n data_padded = data_padded/np.max(data_padded)\n\n if not channels_first:\n # RNN input shape needs to be (batch_size, seq_length, input_features)\n data_padded = np.swapaxes(data_padded, 1, 2)\n\n return data_padded, labels", "def load_data(self):\n params = self.params\n catg = params.data_category\n langs = ['en', params.target_lang]\n data = {lang: {splt: {} for splt in (['train', 'valid'] if lang == 'en' else ['test'])} for lang in langs}\n clf_dataset_path = {\n lang: {\n splt: {\n 'x': os.path.join(params.data_path, '%s_%s_%s_x.bpe.pth' % (splt, lang, catg)),\n 'y': os.path.join(params.data_path, '%s_%s_%s_y.txt' % (splt, lang, catg)),\n } for splt in (['train', 'valid'] if lang == 'en' else ['test'])\n } for lang in langs\n }\n for splt in ['train', 'valid', 'test']:\n for lang in langs:\n if lang == 'en' and splt in ['train', 'valid'] or lang != 'en' and splt == 'test':\n # load data and dictionary\n data1 = load_binarized(clf_dataset_path[lang][splt]['x'], params)\n data['dico'] = data.get('dico', data1['dico'])\n # set dictionary parameters\n set_dico_parameters(params, data, data1['dico'])\n # create dataset\n data[lang][splt]['x'] = Dataset(data1['sentences'], data1['positions'], params)\n # load labels\n with open(clf_dataset_path[lang][splt]['y'], 'r') as f:\n labels = [int(l) for l in f]\n data[lang][splt]['y'] = torch.LongTensor(labels)\n assert len(data[lang][splt]['x']) == len(data[lang][splt]['y'])\n\n return data", "def _load_raw_file(self, tracker, seq, is_gt):\n\n # Only loaded when run to reduce minimum requirements\n from pycocotools import mask as mask_utils\n\n # File location\n if self.data_is_zipped:\n if is_gt:\n zip_file = os.path.join(self.gt_fol, 'data.zip')\n else:\n zip_file = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol + '.zip')\n file = seq + '.txt'\n else:\n zip_file = None\n if is_gt:\n file = self.config[\"GT_LOC_FORMAT\"].format(gt_folder=self.gt_fol, seq=seq)\n else:\n file = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol, seq + '.txt')\n\n # Ignore regions\n if is_gt:\n crowd_ignore_filter = {2: ['10']}\n else:\n crowd_ignore_filter = None\n\n # Load raw data from text file\n read_data, ignore_data = self._load_simple_text_file(file, crowd_ignore_filter=crowd_ignore_filter,\n is_zipped=self.data_is_zipped, zip_file=zip_file,\n force_delimiters=' ')\n\n # Convert data to required format\n num_timesteps = self.seq_lengths[seq]\n data_keys = ['ids', 'classes', 'dets']\n if is_gt:\n data_keys += ['gt_ignore_region']\n raw_data = {key: [None] * num_timesteps for key in data_keys}\n\n # Check for any extra time keys\n current_time_keys = [str(t) for t in range(num_timesteps)]\n extra_time_keys = [x for x in read_data.keys() if x not in current_time_keys]\n if len(extra_time_keys) > 0:\n if is_gt:\n text = 'Ground-truth'\n else:\n text = 'Tracking'\n raise TrackEvalException(\n text + ' data contains the following invalid timesteps in seq %s: ' % seq + ', '.join(\n [str(x) + ', ' for x in extra_time_keys]))\n\n for t in range(num_timesteps):\n time_key = str(t)\n # list to collect all masks of a timestep to check for overlapping areas\n all_masks = []\n if time_key in read_data.keys():\n try:\n raw_data['dets'][t] = [{'size': [int(region[3]), int(region[4])],\n 'counts': region[5].encode(encoding='UTF-8')}\n for region in read_data[time_key]]\n raw_data['ids'][t] = np.atleast_1d([region[1] for region in read_data[time_key]]).astype(int)\n raw_data['classes'][t] = np.atleast_1d([region[2] for region in read_data[time_key]]).astype(int)\n all_masks += raw_data['dets'][t]\n except IndexError:\n self._raise_index_error(is_gt, tracker, seq)\n except ValueError:\n self._raise_value_error(is_gt, tracker, seq)\n else:\n raw_data['dets'][t] = []\n raw_data['ids'][t] = np.empty(0).astype(int)\n raw_data['classes'][t] = np.empty(0).astype(int)\n if is_gt:\n if time_key in ignore_data.keys():\n try:\n time_ignore = [{'size': [int(region[3]), int(region[4])],\n 'counts': region[5].encode(encoding='UTF-8')}\n for region in ignore_data[time_key]]\n raw_data['gt_ignore_region'][t] = mask_utils.merge([mask for mask in time_ignore],\n intersect=False)\n all_masks += [raw_data['gt_ignore_region'][t]]\n except IndexError:\n self._raise_index_error(is_gt, tracker, seq)\n except ValueError:\n self._raise_value_error(is_gt, tracker, seq)\n else:\n raw_data['gt_ignore_region'][t] = mask_utils.merge([], intersect=False)\n\n # check for overlapping masks\n if all_masks:\n masks_merged = all_masks[0]\n for mask in all_masks[1:]:\n if mask_utils.area(mask_utils.merge([masks_merged, mask], intersect=True)) != 0.0:\n raise TrackEvalException(\n 'Tracker has overlapping masks. Tracker: ' + tracker + ' Seq: ' + seq + ' Timestep: ' + str(\n t))\n masks_merged = mask_utils.merge([masks_merged, mask], intersect=False)\n\n if is_gt:\n key_map = {'ids': 'gt_ids',\n 'classes': 'gt_classes',\n 'dets': 'gt_dets'}\n else:\n key_map = {'ids': 'tracker_ids',\n 'classes': 'tracker_classes',\n 'dets': 'tracker_dets'}\n for k, v in key_map.items():\n raw_data[v] = raw_data.pop(k)\n raw_data[\"num_timesteps\"] = num_timesteps\n raw_data['seq'] = seq\n return raw_data", "def load_dataset(self, split, epoch=0, combine=False, **kwargs):\n paths = self.args.data.split(':')\n assert len(paths) > 0\n data_path = paths[epoch % len(paths)]\n\n # infer langcode\n src, tgt = self.args.source_lang, self.args.target_lang\n if not hasattr(self.args, \"source_tau\"): self.args.source_tau = -1\n if not hasattr(self.args, \"target_tau\"): self.args.target_tau = -1\n\n if not hasattr(self.args, 'source_tau'): self.args.source_tau = -1\n if not hasattr(self.args, 'target_tau'): self.args.target_tau = -1\n\n if self.args.main_src_wordfreq is not None and self.args.dialect_src_wordfreq is not None:\n def word_idx_from_file(filename):\n idx = []\n with open(filename, 'r') as myfile:\n for line in myfile:\n idx.append(self.src_dict.index(line.split()[0]))\n return idx\n self.main_src_word_idx = word_idx_from_file(self.args.main_src_wordfreq) \n self.dialect_src_word_idx = word_idx_from_file(self.args.dialect_src_wordfreq)\n idx_to_sample_prob = []\n for i, src_word in enumerate(self.main_src_word_idx):\n if self.args.dialect_tau == -1:\n dialect_word_probs = np.array([1. for k in range(len(self.dialect_src_word_idx))])\n else:\n dialect_word_probs = np.array([-np.absolute(k-i) for k in range(len(self.dialect_src_word_idx))])\n idx_to_sample_prob.append(dialect_word_probs)\n #self.idx_to_sample_prob = scipy.special.softmax(np.array(self.idx_to_sample_prob)*0.01, axis=1)\n idx_to_sample_prob = scipy.special.softmax(np.array(idx_to_sample_prob)*self.args.dialect_tau, axis=1)\n print(idx_to_sample_prob)\n self.idx_to_sample_prob = {}\n for i, src_word in enumerate(self.main_src_word_idx):\n self.idx_to_sample_prob[src_word] = idx_to_sample_prob[i]\n pass_item = (self.idx_to_sample_prob, self.dialect_src_word_idx)\n else:\n pass_item = None\n if split != 'train':\n src_tau = -1 \n tgt_tau = -1\n mlm = None\n idx_to_src_gradnorm = None\n else: \n src_tau = self.args.source_tau \n tgt_tau = self.args.target_tau \n mlm = self.mlm\n idx_to_src_gradnorm = self.idx_to_src_gradnorm\n\n self.datasets[split] = load_langpair_dataset(\n data_path, split, src, self.src_dict, tgt, self.tgt_dict,\n combine=combine, dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=self.args.max_source_positions,\n max_target_positions=self.args.max_target_positions,\n src_tag=self.args.src_tag, tgt_tag=self.args.tgt_tag,\n src_tau=src_tau, tgt_tau=tgt_tau,\n epoch=epoch,\n id_to_sample_probabilities=pass_item,\n lm=mlm,\n idx_to_src_gradnorm=idx_to_src_gradnorm,\n )", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def process_training_data(self, fold_directory):\n # Step 1: Remove the category pages from dataset. If user sequence contains only category pages, do not include it to training\n self.user_sequences_dataframe = joblib.load(fold_directory)\n self.user_sequences_dataframe = self.remove_catalogs(dataframe=self.user_sequences_dataframe)\n\n # Assign the processed item sequences\n self.user_sequence_list = self.user_sequences_dataframe['item_sequence'].values\n\n # Step 2: Create Features and Targets\n self.create_features_and_targets()\n\n # Step 3: Apply Mask to All Features\n self.apply_mask(all_features=self.all_features, number_of_features=1)\n\n # Step 4: Encode Targets\n self.encode_targets()", "def _load_data(self, cfg):\r\n\r\n if self._split == \"train\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TRAIN_LISTS)\r\n elif self._split == \"val\":\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.VAL_LISTS)\r\n else:\r\n self._annotations = self._load_lists(cfg.EGO4D_STA.TEST_LISTS)", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def load_nuclei(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(self.dataset_name, 1, 'nucleus')\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n assert subset in [\"train\", \"val\", \"stage1_train\",\n \"stage1_test\", \"stage2_test\"]\n subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset_dir)\n if subset == \"val\":\n image_ids = NucleusDatasetHandler.VAL_IMAGE_IDS\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[1]\n if subset == \"train\":\n image_ids = list(set(image_ids) -\n set(NucleusDatasetHandler.VAL_IMAGE_IDS))\n\n # Add images\n for image_id in image_ids:\n image_name = f\"images/{image_id}.png\"\n self.add_image(\n self.dataset_name,\n image_id=image_id,\n path=os.path.join(dataset_dir, image_id, image_name)\n )", "def prepare(self):\n bcolz.set_nthreads(2)\n\n # step 0: load only when not loaded yet\n if TRAINING in self.data and VALIDATION in self.data: return\n\n # step 1: load the file names\n patients = sorted(glob.glob(self.location+'/*.*/'))\n print len(patients), \"patients\"\n\n # step 1: load the file names\n # make a stratified validation set\n # note, the seed decides the validation set, but it is deterministic in the names\n random.seed(317070)\n patient_names = [self.patient_name_from_file_name(f) for f in patients]\n validation_patients = random.sample(patient_names, int(VALIDATION_SET_SIZE*len(patient_names)))\n\n labels_as_dict = defaultdict(list)\n\n with open(paths.LUNA_LABELS_PATH, 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(reader) # skip the header\n for row in reader:\n label = (float(row[1]), float(row[2]), float(row[3]), float(row[4]))\n labels_as_dict[str(row[0])].append(label)\n\n # make the static data empty\n for s in self.datasets:\n self.data[s] = []\n self.labels[s] = []\n self.names[s] = []\n self.spacings[s] = []\n self.origins[s] = []\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'spacings.pkl.gz') as f:\n spacings = cPickle.load(f)\n\n with gzip.open(paths.INTERMEDIATE_DATA_PATH + 'origins.pkl.gz') as f:\n origins = cPickle.load(f)\n\n # load the filenames and put into the right dataset\n for i, patient_folder in enumerate(patients):\n patient_id = str(patient_folder.split(path.sep)[-2])\n if patient_id in validation_patients:\n dataset = VALIDATION\n else:\n dataset = TRAIN\n\n\n label = labels_as_dict[patient_id]\n if self.only_positive and not label:\n continue\n\n self.data[dataset].append(patient_folder)\n self.labels[dataset].append(label)\n self.names[dataset].append(patient_id)\n self.spacings[dataset].append(spacings[patient_id])\n self.origins[dataset].append(origins[patient_id])\n\n # give every patient a unique number\n last_index = -1\n for set in self.datasets:\n self.indices[set] = range(last_index+1,last_index+1+len(self.data[set]))\n if len(self.indices[set]) > 0:\n last_index = self.indices[set][-1]\n print set, len(self.indices[set]), \"samples\"", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def load_data_list(self) -> List[dict]: # noqa: E501\n try:\n import lvis\n if getattr(lvis, '__version__', '0') >= '10.5.3':\n warnings.warn(\n 'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"', # noqa: E501\n UserWarning)\n from lvis import LVIS\n except ImportError:\n raise ImportError(\n 'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".' # noqa: E501\n )\n self.lvis = LVIS(self.ann_file)\n self.cat_ids = self.lvis.get_cat_ids()\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cat_img_map = copy.deepcopy(self.lvis.cat_img_map)\n\n img_ids = self.lvis.get_img_ids()\n data_list = []\n total_ann_ids = []\n for img_id in img_ids:\n raw_img_info = self.lvis.load_imgs([img_id])[0]\n raw_img_info['img_id'] = img_id\n # coco_url is used in LVISv1 instead of file_name\n # e.g. http://images.cocodataset.org/train2017/000000391895.jpg\n # train/val split in specified in url\n raw_img_info['file_name'] = raw_img_info['coco_url'].replace(\n 'http://images.cocodataset.org/', '')\n ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])\n raw_ann_info = self.lvis.load_anns(ann_ids)\n total_ann_ids.extend(ann_ids)\n parsed_data_info = self.parse_data_info({\n 'raw_ann_info':\n raw_ann_info,\n 'raw_img_info':\n raw_img_info\n })\n data_list.append(parsed_data_info)\n if self.ANN_ID_UNIQUE:\n assert len(set(total_ann_ids)) == len(\n total_ann_ids\n ), f\"Annotation ids in '{self.ann_file}' are not unique!\"\n\n del self.lvis\n\n return data_list", "def test_conll_doc_dataloader(shared_datadir, dummy_vocabs):\n token_vocab, char_vocab, label_vocab = dummy_vocabs\n dataset = ConllDataset(\n os.path.join(shared_datadir, \"conll_doc_sample.txt\"),\n token_vocab, char_vocab, label_vocab, doc_mode=True,\n graph_builder=SeqTagCRFBuilder(skip_chain_enabled=True))\n\n dataset = DataLoader(dataset, batch_size=3,\n collate_fn=generate_document_batch)\n \n # Check the one batch\n for batch in dataset:\n tokens, token_chars, sentence_lengths, \\\n factor_dependencies, strings, labels = batch\n \n assert sentence_lengths.tolist() == [1, 5, 3, 4, 1, 3, 1, 5, 2]\n assert tokens.shape == (9, 5)\n assert token_chars.shape == (45, 10)\n assert labels.shape == (9, 5)\n \n # Check values\n assert (tokens[0:3] == torch.tensor([\n [0, 1, 1, 1, 1], [5, 6, 7, 2, 0], [6, 4, 3, 1, 1]\n ])).all()\n assert (labels[5:] == torch.tensor([\n [3, 6, 6, 1, 1], [6, 1, 1, 1, 1], [2, 4, 4, 6, 6], [3, 5, 1, 1, 1]\n ])).all()\n \n # Check the factor dependencies\n assert len(factor_dependencies) == 3\n assert factor_dependencies[\"unary\"] == [\n (0,), (5,), (6,), (7,), (8,), (9,), (10,), (11,), (12,),\n (15,), (16,), (17,), (18,), (20,), (25,), (26,), (27,), (30,),\n (35,), (36,), (37,), (38,), (39,), (40,), (41,)\n ]\n \n assert factor_dependencies[\"transition\"] == [\n (5, 6), (6, 7), (7, 8), (8, 9), (10, 11), (11, 12),\n (15, 16), (16, 17), (17, 18), (25, 26), (26, 27),\n (35, 36), (36, 37), (37, 38), (38, 39), (40, 41)\n ]\n \n assert factor_dependencies[\"skip\"] == [\n (5, 15), (6, 10), (35, 41)\n ]\n \n # Check the string values\n assert len(strings) == 9\n assert strings == [\n \"-DOCSTART-\",\n \"Peter Such won at London\",\n \"Such is happy\",\n \"Peter returned home today\",\n \"-DOCSTART-\",\n \"Beyoncé releases album\",\n \"-DOCSTART-\",\n \"Nook 's Cranny opens today\",\n \"Tom Nook\"\n ]" ]
[ "0.69626296", "0.6524709", "0.596794", "0.5951669", "0.5843335", "0.5840305", "0.57236236", "0.5602836", "0.5578917", "0.5575729", "0.55542487", "0.55491525", "0.551724", "0.5510782", "0.54982984", "0.5425961", "0.5419499", "0.5396649", "0.537947", "0.5354146", "0.5336732", "0.53342366", "0.5331055", "0.5322943", "0.53156525", "0.53041834", "0.5300964", "0.5296316", "0.5272292", "0.5269869" ]
0.6857753
1
Updates `self.type_to_idx` with mappings from word, char and tag types to unique int IDs.
def _get_idx_maps(self, types, initial_mapping=None): initial_mapping = constants.INITIAL_MAPPING if initial_mapping is None else initial_mapping # generate type to index mappings self.type_to_idx['word'] = Preprocessor.type_to_idx(types['word'], initial_mapping['word']) self.type_to_idx['char'] = Preprocessor.type_to_idx(types['char'], initial_mapping['word']) self.type_to_idx['tag'] = Preprocessor.type_to_idx(types['tag'], initial_mapping['tag'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n", "def set_indices(self, part_instance_counts):\n type_indices = {}\n for entry in self._entries:\n try:\n entry.set_indices(\n model_type_index=type_indices.setdefault(entry.ENTRY_SUBTYPE, 0),\n instance_count=part_instance_counts.get(entry.name, 0),\n )\n except KeyError as e:\n raise SoulstructError(\n f\"Invalid map component name for {entry.ENTRY_SUBTYPE.name} model {entry.name}: {e}\"\n )\n else:\n type_indices[entry.ENTRY_SUBTYPE] += 1", "def cellannotation2ID(self, annotation_type):\n annotations = list(self.adata.obs[annotation_type])\n annotations_set = sorted(set(annotations))\n \n mapping = {a:idx for idx,a in enumerate(annotations_set)}\n \n truth_labels = [mapping[a] for a in annotations]\n self.adata.obs['label'] = pd.Categorical(values=truth_labels)\n #18m-unannotated\n # \n return mapping", "def upd_type_count(self, upd_type, ct_type):\n for hour in self.data_file.buckets:\n ct_type[hour] = Counter(pt['type'] for pt in self.data_file.buckets[hour])[upd_type]\n return ct_type", "def assign_index(self):\n\n i = 0\n for word in self.words:\n self.index[word] = i\n i += 1", "def add_type_index(self, sample):\n sample['item_type_index'] = types.get_index_of_type(sample['item_type'])", "def map_int_2_wordlist(word_list, vocab_list):\r\n word_to_index = {u:i for i, u in enumerate(vocab_list)}\r\n text_to_num = np.array([word_to_index[c] for c in word_list])\r\n return word_to_index", "def build_idx(vocab):\n word2index = {}\n index2word = {}\n\n word2index['PAD'] = 0\n index2word[0] = 'PAD'\n\n word2index['UNK'] = 1\n index2word[1] = 'UNK'\n\n for i,word in enumerate(vocab):\n word2index[word.lower()] = i+2\n index2word[i+2] = word.lower()\n\n return word2index, index2word", "def rearrange_indices(self):\n num_fixed_words = 2\n other_words = [w for w in self.word2index if w not in self.equivariant_words]\n for idx, word in enumerate(self.equivariant_words):\n w_idx = idx + num_fixed_words\n self.word2index[word] = w_idx\n self.index2word[w_idx] = word\n for idx, word in enumerate(sorted(other_words)):\n w_idx = idx + num_fixed_words + self.num_equivariant_words\n self.word2index[word] = w_idx\n self.index2word[w_idx] = word", "def get_idx_seq(self):\n for partition, filepath in self.directory.items():\n if filepath is not None:\n self.idx_seq[partition] = {\n 'word': Preprocessor.get_type_idx_sequence(self.type_seq[partition]['word'],\n self.type_to_idx['word'],\n type_='word'),\n 'char': Preprocessor.get_type_idx_sequence(self.type_seq[partition]['word'],\n self.type_to_idx['char'],\n type_='char'),\n 'tag': Preprocessor.get_type_idx_sequence(self.type_seq[partition]['tag'],\n self.type_to_idx['tag'],\n type_='tag'),\n }\n # one-hot encode our targets\n self.idx_seq[partition]['tag'] = to_categorical(self.idx_seq[partition]['tag'])", "def token_to_idx(self) -> Dict[Hashable, int]:\n return self._token_to_idx", "def make_idx2word():\n idx2word = {}\n d = train_data.shared['word2idx']\n for word, idx in d.items():\n print(word)\n idx2word[idx] = word\n if config.use_glove_for_unk:\n d2 = train_data.shared['new_word2idx']\n for word, idx in d2.items():\n print(word)\n idx2word[idx+len(d)] = word\n return idx2word", "def _init_prepare_types(self):\n # len(db)-1 wouldn't work here because there could be missing\n # index due to generic filtering\n self.types = {\n key: fit_integer_type(np.max(db.index.values), is_signed=False)\n for key, db in iteritems(self.by_dbs)}", "def map_wordlist_2_int(word_list, vocab_list):\r\n word_to_index = {u:i for i, u in enumerate(vocab_list)}\r\n text_to_num = np.array([word_to_index[c] for c in word_list])\r\n return text_to_num", "def update(self, iterable):\n for word in iterable:\n if word in self:\n self[word] = self[word] + 1\n self.tokens += 1\n else:\n self[word] = 1\n self.types += 1\n self.tokens += 1", "def _create_idx(self):\n self._idx = {}\n for idx, (L, M, N) in enumerate(self.modes):\n if L not in self._idx:\n self._idx[L] = {}\n if M not in self._idx[L]:\n self._idx[L][M] = {}\n self._idx[L][M][N] = idx", "def map_to_per_etype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def map_to_per_etype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def _terms_to_idxs(self,\n level: str,\n term_to_idx: Dict[str, int]\n ) -> None:\n if level == 'token':\n path_in = self.path_token_terms\n path_out = self.path_idx_token_terms\n elif level == 'lemma':\n path_in = self.path_lemma_terms\n path_out = self.path_idx_lemma_terms\n else:\n raise Exception('Error! Level not known.')\n\n terms = set()\n\n with open(path_in, 'r', encoding='utf8') as fin:\n for line in fin:\n terms.add(line.strip('\\n'))\n\n term_cmd = []\n with open(path_out, 'w', encoding='utf8') as fout:\n for t in terms:\n term_cmd.append(term_to_idx[t])\n fout.write(str(term_to_idx[t]) + '\\n')", "def create_class_indices(self) -> None:\n\n categories = list(np.loadtxt(self.path_manager.categories_file(), delimiter=\",\", dtype=str))\n\n if self.include_noise_samples and not self.multi_label_classification:\n categories.append(\"noise\")\n\n self.class_to_idx = {}\n\n for idx, class_name in enumerate(sorted(categories)):\n self.class_to_idx[class_name] = idx", "def map_to_per_etype(self, ids): # -> None:\n ...", "def list_to_idx(lst, typecode):\n magicnumber = _build_magic_number(lst, typecode)\n dimension_sizes = _build_dimension_sizes(lst)\n data = _build_data(lst, typecode)\n\n return magicnumber + dimension_sizes + data", "def collect_type_freqs(self):\n tf.logging.info('collecting type_freq, mapping type_id -> # instances')\n self.type_freq = collections.defaultdict(int)\n for f in self.frames():\n if self.instance_of in f:\n self.type_freq[f[self.instance_of].id] += 1\n # not sure why this is needed - do some things have Null id's?\n del self.type_freq[None]", "def update(self,haiku, typenum):\n self.occurrences += 1\n for i in range(2):\n for x in (haiku.triple[i]).wordarray:\n if (self.wordtype == dictionary.wordtype(x) and \n dictionary.word_filter(x) != self.word):\n self.update_adj_dict(x, i==typenum)", "def map_to_per_ntype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def map_to_per_ntype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def conv_word_to_indexed_txt(txt_vec):\n\n # transform words into integer indexes, comes out as n x m\n # where n = # txt doc, m = # unique words for whole universe\n vectorizer = CountVectorizer(\n stop_words=customised_stopword,\n analyzer='word'\n )\n # CountVectorizer(ngram_range=(1,2), analyzer='word')\n sparse_count_vec = vectorizer.fit_transform(txt_vec)\n\n # create n x p list of words represented by ints,\n # where p = # words in each documentx\n # written in such a convoluted way for speed optimization purposes\n x_vec, y_vec, count_vec = sparse.find(sparse_count_vec)\n\n # add in duplicates\n x_vec = np.repeat(x_vec, count_vec)\n y_vec = np.repeat(y_vec, count_vec)\n\n # convert to torch variables\n x_vec = torch.tensor(x_vec, dtype=torch.int32)\n y_vec = torch.tensor(y_vec, dtype=torch.float)\n\n # sort the vecs\n sort_ix = torch.argsort(x_vec)\n x_vec = x_vec[sort_ix]\n y_vec = y_vec[sort_ix]\n\n x_vec_bincount = torch.bincount(x_vec.cpu())\n bincount_tup = tuple(int(bincount) for bincount in x_vec_bincount)\n indexed_txt_list = list(torch.split(y_vec, bincount_tup))\n\n # the dictionary key to match each word to int\n vocab_dict = vectorizer.vocabulary_\n\n print(\"Converted words to indexes of integers.\")\n\n vocab_count = sparse_count_vec.data\n\n return indexed_txt_list, vocab_dict, vocab_count", "def _cat_to_idx(self, category):\n if not category in self.cat2x:\n return torch.LongTensor([self.categories])\n else:\n return torch.LongTensor([self.cat2x[category]])", "def doc2idx(self, document, unknown_word_index=-1):\n if isinstance(document, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n\n document = [word if isinstance(word, unicode) else unicode(word, 'utf-8') for word in document]\n return [self.token2id.get(word, unknown_word_index) for word in document]", "def map_ord_to_index(origin_char_list, save_path):\n ord_2_index_dict = {str(i) + '_index': str(ord(c)) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n index_2_ord_dict = {str(ord(c)) + '_ord': str(i) for i, c in\n enumerate(CharDictBuilder._read_chars(origin_char_list))}\n total_ord_map_index_dict = dict(ord_2_index_dict)\n total_ord_map_index_dict.update(index_2_ord_dict)\n CharDictBuilder._write_json(save_path, total_ord_map_index_dict)" ]
[ "0.6454551", "0.6133905", "0.6046303", "0.5929449", "0.59073", "0.58796215", "0.5660291", "0.56524235", "0.5624651", "0.5597669", "0.5569194", "0.55043894", "0.5499576", "0.54610884", "0.5451779", "0.541576", "0.5380984", "0.5380984", "0.53680414", "0.53286856", "0.5276909", "0.52721316", "0.517605", "0.5175608", "0.51723146", "0.51723146", "0.5169921", "0.5118406", "0.5101634", "0.50949746" ]
0.66872704
0
Updates `self.idx_seq` with the final representation of the data used for training. Updates `self.idx_seq` with numpy arrays, by using `self.type_to_idx` to map all elements in `self.type_seq` to their corresponding integer IDs, for the train and, if provided, valid/test partitions found at `self.directory/train.`, `self.directory/valid.` and `self.directory/test.`.
def get_idx_seq(self): for partition, filepath in self.directory.items(): if filepath is not None: self.idx_seq[partition] = { 'word': Preprocessor.get_type_idx_sequence(self.type_seq[partition]['word'], self.type_to_idx['word'], type_='word'), 'char': Preprocessor.get_type_idx_sequence(self.type_seq[partition]['word'], self.type_to_idx['char'], type_='char'), 'tag': Preprocessor.get_type_idx_sequence(self.type_seq[partition]['tag'], self.type_to_idx['tag'], type_='tag'), } # one-hot encode our targets self.idx_seq[partition]['tag'] = to_categorical(self.idx_seq[partition]['tag'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_type_seq(self):\n for partition, filepath in self.directory.items():\n if filepath is not None:\n conll_file = os.path.basename(filepath) # get name of conll file\n\n # collect sequence data\n sents = list(self.conll_parser.sents(conll_file))\n tagged_sents = list(self.conll_parser.tagged_sents(conll_file))\n\n word_seq = Preprocessor.replace_rare_tokens(sents) if self.replace_rare_tokens else sents\n char_seq = [[[c for c in w] for w in s] for s in sents]\n tag_seq = [[t[-1] for t in s] for s in tagged_sents]\n\n # update the class attributes\n self.type_seq[partition] = {'word': word_seq, 'char': char_seq, 'tag': tag_seq}", "def make_idx_data(revs, word_idx_map, maxlen=60, is_split = True):\r\n X_train, X_trial, X_test,y_train, y_trial,y_test, lex_train, lex_trial = [], [], [], [], [], [], [], []\r\n for rev in revs:\r\n sent = get_idx_from_sent(rev['text'], word_idx_map)\r\n y = rev['y']\r\n if is_split:\r\n if rev['split'] == 1:\r\n X_train.append(sent)\r\n y_train.append(y)\r\n\r\n elif rev['split'] == -1:\r\n X_trial.append(sent)\r\n y_trial.append(y)\r\n else:\r\n X_test.append(sent)\r\n y_test.append(-1)\r\n\r\n if is_split:\r\n X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)\r\n X_trial = sequence.pad_sequences(np.array(X_trial), maxlen=maxlen)\r\n # X_valid = sequence.pad_sequences(np.array(X_valid), maxlen=maxlen)\r\n y_train = np_utils.to_categorical(np.array(y_train))\r\n y_trial = np_utils.to_categorical(np.array(y_trial))\r\n # y_valid = np.array(y_valid)\r\n\r\n lex_train = train_lexicon.values\r\n lex_trial = trial_lexicon.values\r\n lex_train = np.array(lex_train)\r\n lex_trial = np.array(lex_trial)\r\n return [X_train, X_trial, y_train, y_trial, lex_train, lex_trial]\r\n else:\r\n X_test = sequence.pad_sequences(np.array(X_test), maxlen=117)\r\n lex_test = test_lexicon.values\r\n lex_test = np.array(lex_test)\r\n return [X_test, lex_test]", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def train(self):\r\n for class_ in set(self.train_classes):\r\n data = map(lambda (ind, datum): datum, filter(lambda (ind, datum): self.train_classes[ind] == class_, enumerate(self.train_data)))\r\n self.distribution.index_data(data, class_)", "def _seq2Indices(self, sequence, vocab, start, end, unknown):\n if start:\n sequence.insert(0, start)\n if end:\n sequence.append(end)\n return [vocab[token] if token in vocab else vocab[unknown] for token in sequence]", "def __init__(self, data_dir, seq_id):\n super().__init__()\n self.data_dir = data_dir\n self.seq_id = seq_id\n self.ys = np.load(data_dir + 'samples.npy')[seq_id].reshape(-1, 1, 1)\n self.ys = cuda_move(torch.tensor(self.ys).float())", "def gen_data(self):\n\n # 1,read the source text\n inputs, labels = self.read_data()\n print(\"read finished\")\n\n # 2. word label index\n word_to_index, label_to_index = self.gen_word_label_index()\n # print(\"word_to_index\",word_to_index)\n # print(\"label_to_index\",label_to_index)\n print(\"vocab process finished\")\n #\n # 3,input to index\n inputs_idx = self.trans_to_index(inputs, word_to_index)\n # print((inputs_idx))\n print(\"index transform finished\")\n #\n # 4, padding\n inputs_idx = self.padding(inputs_idx, self._sequence_length)\n # print(inputs_idx)\n print(\"padding finished\")\n\n # 6,label to index\n labels_idx = self.trans_label_to_index(labels, label_to_index)\n print(\"label index transform finished\")\n\n return np.array(inputs_idx), np.array(labels_idx)", "def _yield_training_validation(self, batch_index):\n # print(f'requested batch with index: {batch_index}') # DEBUG\n num_identities = len(self.identities)\n num_ids_to_resample = 0\n # manage identities in a circular way \n ids_start = (batch_index*self.batch_size)%num_identities # identities' batch start\n ids_end = ((batch_index+1)*self.batch_size)%num_identities # identities' batch end\n # Manage the indetities array in a circular manner\n #batch_identities = self.identities[ids_start:ids_end] if ids_start < ids_end else self.identities[ids_start:].append(self.identities[:ids_end])\n if ids_start < ids_end:\n batch_identities = self.identities[ids_start:ids_end]\n else:\n batch_identities = self.identities[ids_start:]\n batch_identities.extend(self.identities[:ids_end])\n samples_batch = []\n labels_batch = []\n roi_batch = []\n for identity in batch_identities:\n identity_data = self.groundtruth_metadata[identity]\n # if there are images available for that identity\n if identity_data['index'] < len(identity_data['metadata']):\n # read the image and the necessary metadata\n img_info = identity_data['metadata'][identity_data['index']]\n img_path = os.path.join(self.dataset_root_path, img_info['path'])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if OpenCV is unable to read an image, it returns None\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n # increase the index, in order to avoid this path when building subsequent batches with this identity\n identity_data['index'] += 1\n # sample another image from another identity to replace this one in the batch\n num_ids_to_resample += 1\n continue\n #batch.append(AgeEstimationSample(img, img_info['roi'], img_info['age'], 'BGR')) # cv2 reads as BGR\n img = img.astype('float32')\n samples_batch.append(img)\n labels_batch.append(img_info['age'])\n roi_batch.append(img_info['roi'])\n # increase the index, because another sample for that identity has been used\n identity_data['index'] += 1\n else:\n num_ids_to_resample += 1\n\n # if for some identities there weren't available images, take them from other identities\n # note that this mechanism solves also the problems arising when less than batch_size identities are available, by\n # picking multiple images from the available entities\n # the __len__ method in the data generator associated to this data loader is responsible for avoiding that this\n # method is called when less than batch_size \"fresh\" images are available\n last_taken_identity_index = ids_end \n num_samples_when_last_taken = num_ids_to_resample\n while(num_ids_to_resample > 0):\n identity = self.identities[ids_end] # remeber that slicing at previous step excludes upper limit\n identity_data = self.groundtruth_metadata[identity]\n if identity_data['index'] < len(identity_data['metadata']):\n last_taken_identity_index = ids_end\n num_samples_when_last_taken = num_ids_to_resample\n # read the image and the necessary metadata\n img_info = identity_data['metadata'][identity_data['index']]\n img_path = os.path.join(self.dataset_root_path, img_info['path'])\n img = cv2.imread(img_path) # watch out for slashes (/)\n # if the path does not exist or there are problems while reading the image\n if img is None:\n print('[DATA LOADER ERROR] cannot find image at path: ', img_path)\n # increase the index, in order to avoid this path when building subsequent batches with this identity\n identity_data['index'] += 1\n continue\n #batch.append(AgeEstimationSample(img, img_info['roi'], img_info['age'], 'BGR')) # cv2 reads as BGR\n img = img.astype('float32')\n samples_batch.append(img)\n labels_batch.append(img_info['age'])\n roi_batch.append(img_info['roi'])\n\n num_ids_to_resample -= 1\n identity_data['index'] += 1\n \n ids_end = ((ids_end+1)%num_identities)\n if ids_end == last_taken_identity_index and num_ids_to_resample == num_samples_when_last_taken and identity_data['index'] == len(identity_data['metadata']):\n raise Exception(f'No more images available, missing {num_ids_to_resample} images!')\n\n # cannot return numpy arrays since images in batch have different sizes\n return samples_batch, labels_batch, roi_batch\n #return batch", "def split_seq_data(X, y, subjects, video_lens, train_ids, val_ids, test_ids, delete_categories=[]):\n # construct a subjects data matrix offset\n X_feature_dim = X.shape[1]\n train_X = np.empty((0, X_feature_dim), dtype='float32')\n val_X = np.empty((0, X_feature_dim), dtype='float32')\n test_X = np.empty((0, X_feature_dim), dtype='float32')\n train_y = np.empty((0,), dtype='int')\n val_y = np.empty((0,), dtype='int')\n test_y = np.empty((0,), dtype='int')\n train_vidlens = np.empty((0,), dtype='int')\n val_vidlens = np.empty((0,), dtype='int')\n test_vidlens = np.empty((0,), dtype='int')\n train_subjects = np.empty((0,), dtype='int')\n val_subjects = np.empty((0,), dtype='int')\n test_subjects = np.empty((0,), dtype='int')\n previous_subject = 1\n subject_video_count = 0\n current_video_idx = 0\n current_data_idx = 0\n populate = False\n for idx, subject in enumerate(subjects):\n if previous_subject == subject: # accumulate\n subject_video_count += 1\n else: # populate the previous subject\n populate = True\n if idx == len(subjects) - 1: # check if it is the last entry, if so populate\n populate = True\n previous_subject = subject\n if populate:\n # slice the data into the respective splits\n end_video_idx = current_video_idx + subject_video_count\n subject_data_len = int(np.sum(video_lens[current_video_idx:end_video_idx]))\n end_data_idx = current_data_idx + subject_data_len\n if previous_subject in train_ids:\n train_X = np.concatenate((train_X, X[current_data_idx:end_data_idx]))\n train_y = np.concatenate((train_y, y[current_data_idx:end_data_idx]))\n train_vidlens = np.concatenate((train_vidlens, video_lens[current_video_idx:end_video_idx]))\n train_subjects = np.concatenate((train_subjects, subjects[current_video_idx:end_video_idx]))\n if previous_subject in val_ids:\n val_X = np.concatenate((val_X, X[current_data_idx:end_data_idx]))\n val_y = np.concatenate((val_y, y[current_data_idx:end_data_idx]))\n val_vidlens = np.concatenate((val_vidlens, video_lens[current_video_idx:end_video_idx]))\n val_subjects = np.concatenate((val_subjects, subjects[current_video_idx:end_video_idx]))\n if previous_subject in test_ids:\n test_X = np.concatenate((test_X, X[current_data_idx:end_data_idx]))\n test_y = np.concatenate((test_y, y[current_data_idx:end_data_idx]))\n test_vidlens = np.concatenate((test_vidlens, video_lens[current_video_idx:end_video_idx]))\n test_subjects = np.concatenate((test_subjects, subjects[current_video_idx:end_video_idx]))\n previous_subject = subject\n current_video_idx = end_video_idx\n current_data_idx = end_data_idx\n subject_video_count = 1\n populate = False\n if len(delete_categories) > 0:\n # Delete some categories from training set and valid set\n vid_idx = 0\n vid_upper_buffer = train_vidlens[0]\n data_idx = 0\n frame_count = 0\n idx = 0\n train_delete_idx = np.empty((0,), dtype=int)\n train_delete_vididx = np.empty((0,), dtype=int)\n while idx < train_y.shape[0]:\n if train_y[idx] not in delete_categories:\n frame_count += 1\n if frame_count >= vid_upper_buffer: # new video\n vid_idx += 1\n if vid_idx < train_vidlens.shape[0]:\n vid_upper_buffer += train_vidlens[vid_idx]\n idx += 1\n continue\n else:\n #print(\"deleted y: {}, pre: {}, aft: {}, idx:{} vid_idx:{}\".format(train_y[idx:idx+train_vidlens[vid_idx]], train_y[idx-1], train_y[idx+train_vidlens[vid_idx]], idx, vid_idx))\n train_delete_idx = np.concatenate((train_delete_idx, np.arange(idx, idx+train_vidlens[vid_idx])))\n train_delete_vididx = np.append(train_delete_vididx, vid_idx)\n idx += train_vidlens[vid_idx]\n frame_count += train_vidlens[vid_idx]\n vid_idx += 1\n if vid_idx < train_vidlens.shape[0]:\n vid_upper_buffer += train_vidlens[vid_idx]\n #print(\"total deleted items number:{}, target items number:{}, deleted videos:{}\".format(train_delete_idx.shape[0], np.where(train_y>=8)[0].shape[0], train_delete_vididx.shape[0]))\n train_X = np.delete(train_X, train_delete_idx, 0)\n train_y = np.delete(train_y, train_delete_idx)\n train_vidlens = np.delete(train_vidlens, train_delete_vididx)\n train_subjects = np.delete(train_subjects, train_delete_vididx)\n\n vid_idx = 0\n vid_upper_buffer = val_vidlens[0]\n data_idx = 0\n frame_count = 0\n idx = 0\n val_delete_idx = np.empty((0,), dtype=int)\n val_delete_vididx = np.empty((0,), dtype=int)\n while idx < val_y.shape[0]:\n if val_y[idx] not in delete_categories:\n frame_count += 1\n if frame_count >= vid_upper_buffer: # new video\n vid_idx += 1\n if vid_idx < val_vidlens.shape[0]:\n vid_upper_buffer += val_vidlens[vid_idx]\n idx += 1\n continue\n else:\n #print(\"deleted y: {}, pre: {}, aft: {}, idx:{} vid_idx:{}\".format(val_y[idx:idx+val_vidlens[vid_idx]], val_y[idx-1], val_y[idx+val_vidlens[vid_idx]], idx, vid_idx))\n val_delete_idx = np.concatenate((val_delete_idx, np.arange(idx, idx+val_vidlens[vid_idx])))\n val_delete_vididx = np.append(val_delete_vididx, vid_idx)\n idx += val_vidlens[vid_idx]\n frame_count += val_vidlens[vid_idx]\n vid_idx += 1\n if vid_idx < val_vidlens.shape[0]:\n vid_upper_buffer += val_vidlens[vid_idx]\n #print(\"total deleted items number:{}, target items number:{}, deleted videos:{}\".format(val_delete_idx.shape[0], np.where(val_y>=8)[0].shape[0], val_delete_vididx.shape[0]))\n val_X = np.delete(val_X, val_delete_idx, 0)\n val_y = np.delete(val_y, val_delete_idx)\n val_vidlens = np.delete(val_vidlens, val_delete_vididx)\n val_subjects = np.delete(val_subjects, val_delete_vididx)\n\n return train_X, train_y, train_vidlens, train_subjects, \\\n val_X, val_y, val_vidlens, val_subjects, \\\n test_X, test_y, test_vidlens, test_subjects", "def _get_idx_maps(self, types, initial_mapping=None):\n initial_mapping = constants.INITIAL_MAPPING if initial_mapping is None else initial_mapping\n # generate type to index mappings\n self.type_to_idx['word'] = Preprocessor.type_to_idx(types['word'], initial_mapping['word'])\n self.type_to_idx['char'] = Preprocessor.type_to_idx(types['char'], initial_mapping['word'])\n self.type_to_idx['tag'] = Preprocessor.type_to_idx(types['tag'], initial_mapping['tag'])", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def set_indices(self, part_instance_counts):\n type_indices = {}\n for entry in self._entries:\n try:\n entry.set_indices(\n model_type_index=type_indices.setdefault(entry.ENTRY_SUBTYPE, 0),\n instance_count=part_instance_counts.get(entry.name, 0),\n )\n except KeyError as e:\n raise SoulstructError(\n f\"Invalid map component name for {entry.ENTRY_SUBTYPE.name} model {entry.name}: {e}\"\n )\n else:\n type_indices[entry.ENTRY_SUBTYPE] += 1", "def __getitem__(self, index):\n \n input_batch = []\n output_batch = []\n for description in self.mapping[index]:\n if self.current_file_index == description[0]:\n input_batch.extend(self.current_input[description[1]: description[2]])\n output_batch.extend(self.current_output[description[1]: description[2]])\n \n else:\n self.current_input = None\n self.current_output = None\n if self.set_type == \"valid\":\n input_file = np.load(\"./data/rnn_input_car_racing_valid.npz\")[\"arr_0\"]\n output_file = np.load(\"./data/rnn_output_car_racing_valid.npz\")[\"arr_0\"]\n else:\n input_file = np.load(\"./data/rnn_input_car_racing_\" + str(description[0]) + \".npz\")[\"arr_0\"]\n output_file = np.load(\"./data/rnn_output_car_racing_\" + str(description[0]) + \".npz\")[\"arr_0\"]\n \n self.current_input = np.array([obs for obs in input_file])\n input_batch.extend(self.current_input[description[1]: description[2]])\n \n self.current_output = np.array([obs for obs in output_file])\n output_batch.extend(self.current_output[description[1]: description[2]])\n \n self.current_file_index = description[0]\n \n ret_input = np.array(input_batch)\n ret_output = np.array(output_batch)\n \n # print(ret_input.shape)\n \n return ret_input, ret_output", "def _load_ID_files(self):\n if self.mode in ['train_noval', 'train_with_val']:\n if not os.path.exists(self._trn_IDs_file) or not os.path.exists(self._val_IDs_file):\n return False\n\n with open(self._trn_IDs_file, 'r') as f:\n self._trn_IDs = f.readlines()\n self._trn_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._trn_IDs]\n\n with open(self._val_IDs_file, 'r') as f:\n self._val_IDs = f.readlines()\n self._val_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._val_IDs]\n\n self._img_trn_path = [(self._trn_dir + '/' + ID[0], self._trn_dir + '/' + ID[1]) for ID in self._trn_IDs]\n self._lbl_trn_path = [self._trn_lbl_dir + '/' + ID[2] for ID in self._trn_IDs]\n\n if self.mode == 'train_noval':\n # Train over the original training set (no validation split)\n self._trn_IDs += self._val_IDs\n for ID in self._val_IDs:\n self._img_trn_path.append((self._val_dir + '/' + ID[0], self._val_dir + '/' + ID[1]))\n self._lbl_trn_path.append(self._val_lbl_dir + '/' + ID[2])\n else:\n # Train over the training split, validate over the validation split\n self._img_val_path, self._lbl_val_path, self._pred_lbl_val_path = [], [], []\n for ID in self._val_IDs:\n self._img_val_path.append((self._val_dir + '/' + ID[0], self._val_dir + '/' + ID[1]))\n self._lbl_val_path.append(self._val_lbl_dir + '/' + ID[2])\n lbl_id = ID[2].replace('.pfm', '.flo').replace('.png', '.flo')\n self._pred_lbl_val_path.append(self._val_pred_lbl_dir + '/' + lbl_id)\n\n if self.opts['tb_test_imgs'] is True:\n # Make test images available to model in training mode\n if not os.path.exists(self._tst_IDs_file):\n return False\n\n with open(self._tst_IDs_file, 'r') as f:\n self._tst_IDs = f.readlines()\n self._tst_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._tst_IDs]\n\n self._img_tst_path, self._pred_lbl_tst_path = [], []\n for ID in self._tst_IDs:\n self._img_tst_path.append((self._tst_dir + '/' + ID[0], self._tst_dir + '/' + ID[1]))\n self._pred_lbl_tst_path.append(self._tst_pred_lbl_dir + '/' + ID[2])\n\n elif self.mode in ['val', 'val_notrain']:\n # Validate over the validation split\n if not os.path.exists(self._val_IDs_file):\n return False\n\n with open(self._val_IDs_file, 'r') as f:\n self._val_IDs = f.readlines()\n self._val_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._val_IDs]\n\n if self.mode == 'val_notrain':\n with open(self._trn_IDs_file, 'r') as f:\n self._trn_IDs = f.readlines()\n self._trn_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._trn_IDs]\n self._val_IDs += self._trn_IDs\n\n self._img_val_path, self._lbl_val_path, self._pred_lbl_val_path = [], [], []\n for ID in self._val_IDs:\n self._img_val_path.append((self._val_dir + '/' + ID[0], self._val_dir + '/' + ID[1]))\n self._lbl_val_path.append(self._val_lbl_dir + '/' + ID[2])\n lbl_id = ID[2].replace('.pfm', '.flo').replace('.png', '.flo')\n self._pred_lbl_val_path.append(self._val_pred_lbl_dir + '/' + lbl_id)\n\n else:\n # Test over the entire testing set\n if not os.path.exists(self._tst_IDs_file):\n return False\n\n with open(self._tst_IDs_file, 'r') as f:\n self._tst_IDs = f.readlines()\n self._tst_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._tst_IDs]\n\n self._img_tst_path, self._pred_lbl_tst_path = [], []\n for ID in self._tst_IDs:\n self._img_tst_path.append((self._tst_dir + '/' + ID[0], self._tst_dir + '/' + ID[1]))\n self._pred_lbl_tst_path.append(self._tst_pred_lbl_dir + '/' + ID[2])\n\n # Build a list of simplified IDs for Tensorboard logging\n if self._trn_IDs is not None:\n self._trn_IDs_simpl = self.simplify_IDs(self._trn_IDs)\n if self._val_IDs is not None:\n self._val_IDs_simpl = self.simplify_IDs(self._val_IDs)\n if self._tst_IDs is not None:\n self._tst_IDs_simpl = self.simplify_IDs(self._tst_IDs)\n\n if _DBG_TRAIN_VAL_TEST_SETS != -1: # Debug mode only\n if self._trn_IDs is not None:\n self._trn_IDs = self._trn_IDs[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._img_trn_path is not None:\n self._img_trn_path = self._img_trn_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._lbl_trn_path is not None:\n self._lbl_trn_path = self._lbl_trn_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._val_IDs is not None:\n self._val_IDs = self._val_IDs[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._img_val_path is not None:\n self._img_val_path = self._img_val_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._lbl_val_path is not None:\n self._lbl_val_path = self._lbl_val_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._pred_lbl_val_path is not None:\n self._pred_lbl_val_path = self._pred_lbl_val_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._tst_IDs is not None:\n self._tst_IDs = self._tst_IDs[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._img_tst_path is not None:\n self._img_tst_path = self._img_tst_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._pred_lbl_tst_path is not None:\n self._pred_lbl_tst_path = self._pred_lbl_tst_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n\n return True", "def __getitem__(self, idx):\n index = lambda x: x[idx * self.batch_size:(idx + 1) * self.batch_size]\n batch_questions = index(self.questions_shuffled) # Shape (batch, seqlen)\n batch_questions_dep = index(self.questions_dep_shuffled) #Shape (batch, seqlen)\n batch_questions_dep_mask = index(self.questions_dep_mask_shuffled) #Shape (batch, seqlen)\n batch_pos_paths = index(self.pos_paths_shuffled) # Shape (batch, seqlen)\n batch_pos_paths_words = index(self.pos_paths_words_shuffled) # Shape (batch, seqlen)\n batch_neg_paths = index(self.neg_paths_shuffled) # Shape (batch, seqlen)\n batch_neg_paths_words = index(self.neg_paths_words_shuffled) # Shape (batch, seqlen)\n if self.schema != 'default':\n batch_neg_paths_rel1 = index(self.neg_paths_rel1_shuffled) # Shape (batch, seqlen)\n batch_neg_paths_rel2 = index(self.neg_paths_rel2_shuffled) # Shape (batch, seqlen)\n batch_neg_paths_rel3 = index(self.neg_paths_rel3_shuffled) # Shape (batch, seqlen)\n batch_neg_paths_rel4 = index(self.neg_paths_rel4_shuffled) # Shape (batch, seqlen)\n batch_pos_paths_rel1 = index(self.pos_paths_rel1_shuffled) # Shape (batch, seqlen)\n batch_pos_paths_rel2 = index(self.pos_paths_rel2_shuffled) # Shape (batch, seqlen)\n batch_pos_paths_rel3 = index(self.pos_paths_rel3_shuffled) # Shape (batch, seqlen)\n batch_pos_paths_rel4 = index(self.pos_paths_rel4_shuffled) # Shape (batch, seqlen)\n\n if self.schema == 'default':\n return ([batch_questions, batch_questions_dep, batch_questions_dep_mask,\n batch_pos_paths, batch_neg_paths,\n batch_pos_paths_words,batch_neg_paths_words], self.dummy_y)\n else:\n return ([batch_questions, batch_questions_dep, batch_questions_dep_mask,\n batch_pos_paths,\n batch_pos_paths_rel1,batch_pos_paths_rel2,batch_pos_paths_rel3,batch_pos_paths_rel4,\n batch_neg_paths,\n batch_neg_paths_rel1,batch_neg_paths_rel2,batch_neg_paths_rel3,batch_neg_paths_rel4,\n batch_pos_paths_words,batch_neg_paths_words], self.dummy_y)", "def objects_to_index_matrix(\n self, object_seq_seq: Sequence[Sequence[Any]]) -> np.ndarray:\n row_length = max([len(seq) for seq in object_seq_seq])\n res = np.zeros((len(object_seq_seq), row_length))\n for i, object_seq in enumerate(object_seq_seq):\n for j, obj in enumerate(object_seq):\n if obj in self.obj_to_idx:\n res[i][j] = self.obj_to_idx[obj]\n else:\n res[i][j] = self.start-1\n return res", "def load_data_withIdx(subjIdx_list, filenames, test_ratio = 0.2):\n dataset_path = filenames['data']\n train_path = filenames['train']\n test_path = filenames['test']\n eval_path = filenames['eval']\n raw_image_lists = []\n aligned_image_lists = []\n print('[INFO] Data Processing started ... ')\n\n for subIdx in subjIdx_list:\n subj_path = dataset_path+'/subj_'+f'{subIdx:02d}'\n print('\\tProccessing images for subject_' +f'{subIdx:02d}'+ ' ...', end =\" \")\n\n if(not os.path.isdir(subj_path)):\n print(' no file exists for subject_' +f'{subIdx:02d}'+ ' !!')\n continue\n\n raw_list = sorted(glob.glob(subj_path + '/raw_images/*.jpg'))\n aligned_list = sorted(glob.glob(subj_path + '/aligned_images/*.jpg')) \n\n assert len(raw_list) == len(aligned_list) , \"data size mismatch! raw_img:{0}, alig_img:{1}\".format(len(raw_list) ,len(aligned_list)) \n \n raw_image_lists += raw_list\n aligned_image_lists += aligned_list\n print(' Done!')\n\n data_to_write_train = train_test_split_data(aligned_image_lists, raw_image_lists, test_ratio)\n data_to_write_test = train_test_split_data(data_to_write_train[1], data_to_write_train[3], 0.5)\n print('[INFO] Processing Done! ')\n print(\"[INFO] Number of train data :{0:4d}, Number of eval data :{1:4d}, Number of test data :{2:4d} \"\n . format(len(data_to_write_train[0]), len(data_to_write_test[0]), len(data_to_write_test[1]))) \n # data write part \n write_data(data_to_write_train, data_to_write_test, train_path, eval_path ,test_path)", "def __getitem__(self, key):\n if not isinstance(key, int):\n raise TypeError\n if key < 0 or key >= len(self.data):\n raise IndexError\n batch = self.data[key]\n batch_size = len(batch)\n batch = list(zip(*batch))\n assert len(batch) == 6\n\n # orig_idx = lens\n token_ids = np.array(seq_padding(batch[0], self.max_len))\n s_start, s_end = np.array(batch[1]), np.array(batch[2])\n o_labels = np.array(batch[3])\n distance_to_s = np.array(seq_padding(batch[4], self.max_len))\n mask = np.array(seq_padding(batch[5], self.max_len))\n\n # print(token_ids, s_start, s_end, o_labels)\n\n return (token_ids, distance_to_s, s_start, s_end, o_labels, mask)", "def shuffle_train(self):\r\n if self.data_container.task == 'Classify':\r\n id_train_list=[]\r\n for i in self.idx_train_list:\r\n id_train_list.append(self._random_state.choice(i,self.train_parms[0]))\r\n for j in self._random_state.choice(self.unique_value, self.train_parms[1]):\r\n id_train_list.append(self._random_state.choice(self.idx_train_list[j],1))\r\n self.idx['train'] = np.concatenate(id_train_list, axis=0)\r\n \r\n self.idx['train'] = self._random_state.permutation(self.idx['train'])", "def load(self):\n if self.directory is None:\n err_msg = \"`Dataset.directory` is None; must be provided before call to `Dataset.load`\"\n LOGGER.error('ValueError %s', err_msg)\n raise ValueError(err_msg)\n\n # unique words, chars and tags from CoNLL formatted dataset\n types = self._get_types()\n # map each word, char, and tag type to a unique integer\n self._get_idx_maps(types)\n\n # get word, char, and tag sequences from CoNLL formatted dataset\n self._get_type_seq()\n # get final representation used for training\n self.get_idx_seq()\n\n # useful during prediction / annotation\n self.idx_to_tag = generic_utils.reverse_dict(self.type_to_idx['tag'])", "def update_id2idx(self):\n self._id2idx = {}\n for n, cell in enumerate(self._cell_list):\n self._id2idx[cell.id()] = n", "def make_idx_data(revs, word_idx_map, maxlen=60):\n X_train, X_test, X_dev, y_train, y_dev,= [], [], [], [], []\n for rev in revs:\n sent = get_idx_from_sent(rev['text'], word_idx_map)\n y = rev['y']\n if rev['split'] == 1:\n X_train.append(sent)\n y_train.append(y)\n elif rev['split'] == 0:\n X_dev.append(sent)\n y_dev.append(y)\n elif rev['split'] == -1:\n X_test.append(sent)\n\n X_train = sequence.pad_sequences(np.array(X_train), maxlen=maxlen)\n X_dev = sequence.pad_sequences(np.array(X_dev), maxlen=maxlen)\n X_test = sequence.pad_sequences(np.array(X_test), maxlen=maxlen)\n y_train = np_utils.to_categorical(np.array(y_train))\n y_dev = np_utils.to_categorical(np.array(y_dev))\n\n return [X_train, X_test, X_dev, y_train, y_dev,]", "def step(self):\n\n if self._is_training:\n self._pos = self.training_pos.item()\n\n if self._params.dataset_config == DatasetConfig.TRAIN_ONLY:\n sample_range = len(self._train_label_indexes)\n self._is_training = True\n elif self._params.dataset_config == DatasetConfig.TEST_ONLY:\n sample_range = len(self._test_label_indexes)\n self._is_training = False\n else:\n total_dataset_size = len(self._train_label_indexes) + len(self._test_label_indexes)\n self._is_training = self._presented % total_dataset_size < len(self._train_label_indexes)\n switch_from_training_to_testing = self._presented % total_dataset_size == len(self._train_label_indexes)\n switch_from_testing_to_training = self._presented % total_dataset_size == 0\n if switch_from_training_to_testing or switch_from_testing_to_training:\n self._pos = -1 # needed for sequential order\n if self._is_training:\n sample_range = len(self._train_label_indexes)\n else:\n sample_range = len(self._test_label_indexes)\n\n self._presented += 1\n\n if self._params.random_order:\n self._pos = self._random.randint(low=0, high=sample_range)\n if self._is_location_filtering():\n self._pos = self._filter_location_random_position(self._is_training)\n else: # sequential order\n self._pos = (self._pos + 1) % sample_range\n if self._is_location_filtering():\n skipped_beginning, self._skip_next_step, self._pos = self._filter_location_sequential(self._is_training)\n self._presented += skipped_beginning\n\n self._copy_to_outputs_from(self._pos, self._is_training)\n\n self._pos += self._skip_next_step\n self._presented += self._skip_next_step\n self._skip_next_step = 0\n\n # write the training pos to tensor\n if self._is_training:\n self.training_pos[0] = self._pos", "def train(self):\n acc_time = []\n data_test = self.val_data[0][0][0]\n labels_test = self.val_data[0][0][1]\n for i, train_batch in enumerate(self.dataset):\n \n writerDIM = SummaryWriter('runs/experiment_DIM'+str(i))\n data,labels, t = train_batch\n\n index_tr,index_cv,coreset = data_split(data.shape[0],777)\n\n # adding eventual replay patterns to the current batch\n if i == 0:\n ext_mem = [data[coreset], labels[coreset]]\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n else:\n dataP = ext_mem[0]\n labP = ext_mem[1]\n\n ext_mem = [\n np.concatenate((data[coreset], ext_mem[0])),\n np.concatenate((labels[coreset], ext_mem[1]))]\n if self.replay:\n dataC = np.concatenate((data[index_tr], data[index_cv],dataP),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv],labP),axis=0)\n else:\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n\n\n\n print(\"----------- batch {0} -------------\".format(i))\n print(\"Task Label: \", t)\n trC,cvC = data_split_Tr_CV(dataC.shape[0],777)\n\n train_set = LoadDataset(dataC,labC,transform=self.tr,indices=trC)\n val_set = LoadDataset(dataC,labC,transform=self.tr,indices=cvC)\n print('Training set: {0} \\nValidation Set {1}'.format(train_set.__len__(),val_set.__len__()))\n batch_size=32\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaders = {'train':train_loader,'val':valid_loader}\n \n ####### Set hyperparameters for the training\n if i ==0: \n prior = False\n ep=40\n dim_model = DIM_model(batch_s=32,num_classes =128,feature=True) \n dim_model.to(self.device)\n classifierM = _classifier(n_input=128,n_class=50,n_neurons=[256,256,128])\n classifierM = classifierM.to(self.device)\n writer = SummaryWriter('runs/experiment_C'+str(i))\n lr_new = 0.00001\n lrC=0.0001\n \n else:\n prior = True\n ep=6\n \n lr_new =0.000005\n lrC = 0.00005\n\n optimizer = torch.optim.Adam(dim_model.parameters(),lr=lr_new)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1) #there is also MultiStepLR\n\n tr_dict_enc = {'ep':ep,'writer':writerDIM,'best_loss':1e10,'t_board':True,\n 'gamma':.5,'beta':.5,'Prior_Flag':prior,'discriminator':classifierM} \n tr_dict_cl = {'ep':40,'writer':writer,'best_loss':1e10,'t_board':True,'gamma':1}#40\n\n if i==0 and self.load:\n print('Load DIM model weights first step')\n dim_model.load_state_dict(torch.load(self.path + 'weights/weightsDIM_T0.pt'))\n else:\n ############################## Train Encoder########################################\n dim_model,self.stats = trainEnc_MI(self.stats,dim_model, optimizer, scheduler,dataloaders,self.device,tr_dict_enc)\n ####################################################################################\n if i==0:\n torch.save(dim_model.state_dict(), self.path + 'weights/weightsDIM_T'+str(i)+'.pt')\n\n ####\n #Conversion of image into latent space representation for classifier training\n ####\n dim_model.requires_grad_(False)\n for phase in ['train','val']:\n dataF = None\n labF = None\n for inputs, labels in dataloaders[phase]:\n torch.cuda.empty_cache()\n if len(inputs.shape)==5:\n\n inputs = inputs[:,:,:,:,0].to(self.device)\n else:\n inputs = inputs.to(self.device)\n\n _,_,pred = dim_model(inputs)\n pred_l = pred.data.cpu().numpy()\n if dataF is None:\n dataF = pred_l\n labF = labels.data.cpu().numpy()\n else:\n dataF = np.concatenate((dataF,pred_l),axis=0)\n labF = np.concatenate((labF,labels.data.cpu().numpy()),axis=0)\n\n if phase == 'train':\n dataTr_f = dataF\n labTr_f = labF\n else:\n dataCv_f = dataF\n labCv_f = labF\n \n dim_model.requires_grad_(True)\n train_set = LoadFeat(dataTr_f,labTr_f)\n val_set = LoadFeat(dataCv_f,labCv_f)\n batch_size=32\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaderC = {'train':train_loader,'val':valid_loader}\n\n optimizerC = torch.optim.Adam(classifierM.parameters(),lr=lrC)\n schedulerC = lr_scheduler.StepLR(optimizerC,step_size=40,gamma=0.1)\n classifierM.requires_grad_(True)\n\n ############################## Train Classifier ########################################\n classifierM,self.stats = train_classifier(self.stats,classifierM, optimizerC, schedulerC,dataloaderC,self.device,tr_dict_cl) \n #################################### #################################### ##############\n\n torch.save(classifierM.state_dict(), self.path + 'weights/weightsC_T'+str(i)+'.pt')\n dim_model.eval()\n classifierM.eval()\n #### Cross_val Testing\n \n test_set = LoadDataset(data_test,labels_test,transform=self.trT)\n batch_size=32\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n score= []\n\n for inputs, labels in test_loader:\n torch.cuda.empty_cache()\n inputs = inputs.to(self.device)\n labels = labels.to(self.device) \n _,_,ww =dim_model(inputs)\n pred = classifierM(ww)\n pred_l = pred.data.cpu().numpy()\n score.append(np.sum(np.argmax(pred_l,axis=1)==labels.data.cpu().numpy())/pred_l.shape[0])\n print('TEST PERFORMANCES:', np.asarray(score).mean())\n acc_time.append(np.asarray(score).mean())\n del test_set,test_loader\n \n self.dim_model = dim_model\n self.classifierM = classifierM\n acc_time = np.asarray(acc_time)\n return self.stats,acc_time", "def _create_examples(self, lines, set_type):\n # Parallelizing a bit batch computation because it is quite slow...\n #lines = lines[:500]\n step = 18 # 17 sentences per input sequence\n #encoded_dict = self.tokenizer.encode('[CLS] ' + ' [SEP] [CLS] '.join(lines) + ' [SEP]')\n #tokens = np.array(encoded_dict.tokens)\n #ids = np.array(encoded_dict.ids)\n \n n = len(lines)\n \n def f(i, sequence):\n guid = \"%s-%s\" % (set_type, i)\n text_a = self.pad_to_max_length([2] + self.mask_tokens(sequence) + [3])\n text_b = [0 if item==0 else 1 for item in text_a]\n label = self.pad_to_max_length([2] + sequence + [3])\n label = [label[i] if item==4 else -100 for i, item in enumerate(text_a)] # for loss computation, only taking into account MASK tokens with id==4\n example = InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label)\n return example\n \n def g(i, line):\n sequence = self.tokenizer.encode(' '.join(line)).ids\n return f(i, sequence)\n \n # Splitting data for memory issues...\n indexes = list(range(0, n, step))\n m = len(indexes)\n n_splits = self.n_splits\n splits = [indexes[i*m//n_splits: m*(i+1)//n_splits] for i in range(n_splits)]\n for index_split, split in enumerate(splits):\n print(f\"Computing split {index_split+1} / {n_splits}... Split size: {len(split)}\")\n examples = Parallel(n_jobs=-1)(delayed(g)(index+split[0], lines[i:i + step]) for index, i in tqdm(enumerate(split)))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl'), examples)\n # Merging\n #examples = [self.load_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl')) for index_split in range(n_splits)]\n #examples = [item for l in examples for item in l]\n #self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples.pkl'), examples)\n \n examples_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl') for index_split in range(n_splits)]\n \n return examples_paths", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def __getitem__(self, index):\n training_data_structure = {\n 'session1': list(range(1, 12 + 1)),\n 'session2': list(range(1, 9 + 1)),\n 'session3': list(range(1, 9 + 1)),\n }\n\n # 30 x 3 x 32 x 64\n def random_si_idx():\n if self.is_train_data:\n si_idx = np.random.choice(self.train_subjects)\n labels = self.train_subjects.index(si_idx)\n else:\n\n si_idx = np.random.choice(self.test_subjects)\n labels = self.test_subjects.index(si_idx)\n return si_idx, labels\n\n def random_vi_idx(si):\n\n if si in list(range(1, 147 + 1)):\n if si in [1, 2, 4, 7, 8, 12, 13, 17, 31, 40, 48, 77]:\n reading_dir = random.choice(['session1', 'session3'])\n else:\n reading_dir = 'session1'\n else:\n reading_dir = 'session2'\n\n vi_idx = np.random.choice(training_data_structure[reading_dir])\n\n return reading_dir, vi_idx\n\n def random_length(dirt, length):\n files = sorted(os.listdir(dirt))\n num = len(files)\n if num - length < 2:\n return None\n start = np.random.randint(1, num - length)\n end = start + length\n return files[start:end]\n\n def read_frames(frames_pth, file_names):\n # frames = np.zeros(self.im_shape, np.float32)\n frames = []\n for f in file_names:\n frame = np.asarray(Image.open(os.path.join(frames_pth, f)))\n frame = self.transform(frame)\n frames.append(frame)\n frames = torch.stack(frames)\n return frames\n\n si, labels = random_si_idx()\n session_dir1, vi1 = random_vi_idx(si)\n session_dir2, vi2 = random_vi_idx(si)\n frames_pth1 = os.path.join(self.data_root, session_dir1, '%03d_%02d' % (si, vi1))\n frames_pth2 = os.path.join(self.data_root, session_dir2, '%03d_%02d' % (si, vi2))\n file_names1 = random_length(frames_pth1, self.clip_len)\n file_names2 = random_length(frames_pth2, self.clip_len)\n\n while True:\n if file_names1 == None or file_names2 == None:\n session_dir1, vi1 = random_vi_idx(si)\n session_dir2, vi2 = random_vi_idx(si)\n frames_pth1 = os.path.join(self.data_root, session_dir1, '%03d_%02d' % (si, vi1))\n frames_pth2 = os.path.join(self.data_root, session_dir2, '%03d_%02d' % (si, vi2))\n file_names1 = random_length(frames_pth1, self.clip_len)\n file_names2 = random_length(frames_pth2, self.clip_len)\n else:\n break\n\n data1 = read_frames(frames_pth1, file_names1)\n data2 = read_frames(frames_pth2, file_names2)\n\n return data1, data2, labels", "def validation_step(self, batch, batch_idx):\n\n src_batch, trg_batch = batch\n\n src_seq = src_batch[\"src_ids\"]\n # change from [batch, seq_len] -> to [seq_len, batch]\n src_seq = src_seq.transpose(0, 1)\n src_lengths = src_batch[\"src_lengths\"]\n\n trg_seq = trg_batch[\"trg_ids\"]\n # change from [batch, seq_len] -> to [seq_len, batch]\n trg_seq = trg_seq.transpose(0, 1)\n trg_lengths = trg_batch[\"trg_lengths\"]\n\n outputs = self.forward(src_seq, src_lengths, trg_seq, 0)\n\n # # without sos token at the beginning and eos token at the end\n logits = outputs[1:].view(-1, self.output_dim)\n\n # trg = trg_seq[1:].view(-1)\n\n trg = trg_seq[1:].reshape(-1)\n\n # trg = [(trg len - 1) * batch size]\n # output = [(trg len - 1) * batch size, output dim]\n\n loss = self.loss(logits, trg)\n\n # take without first sos token, and reduce by 2 dimension, take index of max logits (make prediction)\n # seq_len * batch size * vocab_size -> seq_len * batch_size\n\n pred_seq = outputs[1:].argmax(2)\n\n # change layout: seq_len * batch_size -> batch_size * seq_len\n pred_seq = pred_seq.T\n\n # change layout: seq_len * batch_size -> batch_size * seq_len\n trg_batch = trg_seq[1:].T\n\n # compere list of predicted ids for all sequences in a batch to targets\n acc = plfunc.accuracy(pred_seq.reshape(-1), trg_batch.reshape(-1))\n\n # need to cast to list of predicted sequences (as list of token ids) [ [seq1_tok1, seq1_tok2, ...seq1_tokN],..., [seqK_tok1, seqK_tok2, ...seqK_tokZ]]\n predicted_ids = pred_seq.tolist()\n\n # need to add additional dim to each target reference sequence in order to\n # convert to format needed by bleu_score function [ seq1=[ [reference1], [reference2] ], seq2=[ [reference1] ] ]\n target_ids = torch.unsqueeze(trg_batch, 1).tolist()\n\n # bleu score needs two arguments\n # first: predicted_ids - list of predicted sequences as a list of predicted ids\n # second: target_ids - list of references (can be many, list)\n bleu_score = plfunc.nlp.bleu_score(predicted_ids, target_ids, n_gram=3).to(\n self.device\n ) # torch.unsqueeze(trg_batchT,1).tolist())\n\n self.log(\n \"val_loss\",\n loss,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n sync_dist=True,\n )\n self.log(\n \"val_acc\",\n acc,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n sync_dist=True,\n )\n self.log(\n \"val_bleu_idx\",\n bleu_score,\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n logger=True,\n sync_dist=True,\n )\n\n return loss, acc, bleu_score", "def __getitem__(self, index):\n assert index < self.numBatches, \"%d > %d\" % (index, self.numBatches)\n lengths,max_len = self.getLengths(index )\n def wrap(b,l ):\n #batch, len, feature\n if b is None:\n return b\n b = torch.stack(b, 0).transpose(0,1).contiguous()\n if self.cuda:\n b = b.cuda()\n packed = pack(b,list(l))\n return packed\n\n def wrap_align(b,l ):\n #batch, len_tgt, len_src\n if b is None:\n return b\n b = torch.stack(b, 0).transpose(0,1).contiguous().float()\n if self.cuda:\n b = b.cuda()\n packed = pack(b,list(l))\n return packed\n\n idsBatch = self.example_ids[index*self.batchSize:(index+1)*self.batchSize]\n # prep a tensor with fixed max_len of this batch\n srcBatch = self._batchify_src(\n self.src[index*self.batchSize:(index+1)*self.batchSize],max_len)\n\n # batch_size x word_len x char_len\n src_charBatch= self._batchify_src_char(\n self.src_char[index*self.batchSize: (index+1)*self.batchSize], max_len)\n\n if self.source_only:\n src_sourceBatch = self.src_source[index*self.batchSize:(index+1)*self.batchSize]\n\n batch = zip(idsBatch, srcBatch, src_charBatch, src_sourceBatch)\n order_data = sorted(list(enumerate(list(zip(batch, lengths)))),key = lambda x:-x[1][1])\n order,data = zip(*order_data)\n batch, lengths = zip(*data)\n idsBatch, srcBatch, src_charBatch, src_sourceBatch = zip(*batch)\n return order,idsBatch, wrap(srcBatch,lengths), wrap(src_charBatch, lengths), src_sourceBatch\n\n else:\n # batch input data for amr\n tgtBatch = self._batchify_tgt(\n self.tgt[index*self.batchSize:(index+1)*self.batchSize],max_len)\n # batch input for alignment from align_index\n alignBatch = self._batchify_align(\n self.align_index[index*self.batchSize:(index+1)*self.batchSize],max_len)\n\n rel_seq_pre = self.rel_seq[index*self.batchSize:(index+1)*self.batchSize]\n rel_index_pre = self.rel_index[index*self.batchSize:(index+1)*self.batchSize]\n rel_role_pre = self.rel_mat[index*self.batchSize:(index+1)*self.batchSize]\n\n roots =self.root[index*self.batchSize:(index+1)*self.batchSize]\n\n src_sourceBatch = self.src_source[index*self.batchSize:(index+1)*self.batchSize]\n tgt_sourceBatch = self.tgt_source[index*self.batchSize:(index+1)*self.batchSize]\n sourceBatch = [ src_s +tgt_s for src_s,tgt_s in zip(src_sourceBatch,tgt_sourceBatch)]\n # within batch sorting by decreasing length for variable length rnns\n indices = range(len(srcBatch))\n\n batch = zip(indices, idsBatch, srcBatch, src_charBatch, tgtBatch,alignBatch,rel_seq_pre,rel_index_pre,rel_role_pre,sourceBatch,roots)\n order_data = sorted(list(enumerate(list(zip(batch, lengths)))),key = lambda x:-x[1][1])\n order,data = zip(*order_data)\n batch, lengths = zip(*data)\n indices, idsBatch, srcBatch,src_charBatch, tgtBatch,alignBatch ,rel_seq_pre,rel_index_pre,rel_role_pre,sourceBatch,roots= zip(*batch)\n\n rel_batch,rel_index_batch,rel_lengths = self._batchify_rel_concept(rel_seq_pre,rel_index_pre)\n rel_roles,length_squares = self._batchify_rel_roles(rel_role_pre)\n\n\n return order,idsBatch, wrap(srcBatch,lengths), wrap(src_charBatch, lengths), wrap(tgtBatch,lengths), wrap_align(alignBatch,lengths),\\\n MyPackedSequence(rel_batch,rel_lengths),rel_index_batch,MyPackedSequence(rel_roles,length_squares),roots,sourceBatch", "def _index(self, corpus):\n\n # Transform documents to embeddings vectors\n ids, dimensions, stream = self.embedder.model.index(corpus)\n\n # Load streamed embeddings back to memory\n embeddings = np.empty((len(ids), dimensions), dtype=np.float32)\n with open(stream, \"rb\") as queue:\n for x in range(embeddings.shape[0]):\n embeddings[x] = pickle.load(queue)\n\n # Remove temporary file\n os.remove(stream)\n\n all_text = []\n for para_id, text, _ in corpus:\n all_text.append([text, para_id])\n\n df = pd.DataFrame(all_text, columns=[\"text\", \"paragraph_id\"])\n\n embedding_path = os.path.join(\n self.index_path, self.embed_paths[\"embeddings\"])\n dataframe_path = os.path.join(\n self.index_path, self.embed_paths[\"dataframe\"])\n ids_path = os.path.join(self.index_path, self.embed_paths[\"ids\"])\n\n # Load new data\n if os.path.isfile(embedding_path) and (self.encoder_args[\"overwrite\"] is False):\n logger.info(f\"Loading new data from {embedding_path}\")\n\n # Load existing embeddings\n old_embeddings = np.load(embedding_path) # LOAD EMBEDDINGS\n # Remove embeddings with document id overlaps\n embeddings = np.vstack((old_embeddings, embeddings))\n\n # load IDs\n old_ids = [doc_id[:-1] for doc_id in open_txt(ids_path)]\n logger.debug(f\"New ID Length = {len(ids)}\")\n logger.debug(f\"Old ID Length = {len(old_ids)}\")\n # Remove document ids overlaps\n logger.debug(f\"New ID Length = {len(ids)}\")\n ids = old_ids + ids\n logger.debug(f\"Merged ID Length = {len(ids)}\")\n\n # Append new dataframe\n old_df = pd.read_csv(dataframe_path)\n df = pd.concat([old_df, df])\n\n # Store embeddings and document index\n # for future reference\n np.save(embedding_path, embeddings)\n with open(ids_path, \"w\") as fp:\n fp.writelines([i + \"\\n\" for i in ids])\n\n # Save data csv\n df.to_csv(dataframe_path, index=False)\n\n # Normalize embeddings\n self.embedder.normalize(embeddings)\n\n # Save embeddings metadata\n self.embedder.config[\"ids\"] = ids\n self.embedder.config[\"dimensions\"] = dimensions\n\n # Create embeddings index\n logger.info(f\"Creating embeddings and index\")\n self.embedder.embeddings = ANN.create(self.embedder.config)\n logger.info(f\"Created embeddings\")\n\n # Build the index\n self.embedder.embeddings.index(embeddings)\n logger.info(f\"Built the embeddings index\")" ]
[ "0.55195016", "0.53395104", "0.52961147", "0.5246112", "0.5237849", "0.52238286", "0.5220458", "0.51024866", "0.5090563", "0.508247", "0.5082226", "0.50806385", "0.5064342", "0.4997088", "0.4990647", "0.49789608", "0.49775425", "0.49547988", "0.49478444", "0.49363944", "0.49306542", "0.4917279", "0.49142087", "0.49020556", "0.48931438", "0.48887694", "0.48879308", "0.48667735", "0.48657277", "0.48591724" ]
0.6848295
0
Runs the EnergyPlus software
def run_energyplus(epexe_fp, out_fp, idf_fp, epw_fp, output_prefix='eplus', display=True ): #CREATES THE 'OUT' FOLDER IF IT DOESN'T EXIST if not os.path.isdir(out_fp): os.mkdir(out_fp) #DELETES THE 'eplusout.expidf' FILE IN 'out_fp' IF IT'S PRESENT # this is needed to force the recreation of this file... expidf_fp=os.path.join(out_fp,output_prefix+'out.expidf') if os.path.isfile(expidf_fp): os.remove(expidf_fp) #RUN ENERGYPLUS VIA SUBPROCESS.POPEN l=[epexe_fp, '-x', '-r', '-c', '-d',out_fp, '-p',output_prefix, '-w',epw_fp, idf_fp] st=' '.join(l) os.system(st) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _start_eplus_simulation(self):\n if not self.model:\n self.exit('No model specified.')\n if not self.weather:\n self.exit('No weather specified.')\n model_path = self.model\n if model_path[0] == '~':\n model_path = os.path.expanduser(model_path)\n if model_path[0] != '/':\n model_path = os.path.join(self.cwd, model_path)\n weather_path = self.weather\n if weather_path[0] == '~':\n weather_path = os.path.expanduser(weather_path)\n if weather_path[0] != '/':\n weather_path = os.path.join(self.cwd, weather_path)\n model_dir = os.path.dirname(model_path)\n bcvtb_dir = self.bcvtb_home\n if bcvtb_dir[0] == '~':\n bcvtb_dir = os.path.expanduser(bcvtb_dir)\n if bcvtb_dir[0] != '/':\n bcvtb_dir = os.path.join(self.cwd, bcvtb_dir)\n _log.debug('Working in %r', model_dir)\n\n self._write_port_file(os.path.join(model_dir, 'socket.cfg'))\n self._write_variable_file(os.path.join(model_dir, 'variables.cfg'))\n\n if self.version >= 8.4:\n cmd_str = \"cd %s; export BCVTB_HOME=%s; energyplus -w %s -r %s\" % (\n model_dir, bcvtb_dir, weather_path, model_path)\n else:\n cmd_str = \"export BCVTB_HOME=%s; runenergyplus %s %s\" % (bcvtb_dir, model_path, weather_path)\n _log.debug('Running: %s', cmd_str)\n f = open(model_path, 'r')\n lines = f.readlines()\n f.close()\n endmonth = 0\n if self.currentday + self.length > self.maxday:\n endday = self.currentday + self.length - self.maxday\n endmonth = self.currentmonth + 1\n else:\n endday = self.currentday + self.length\n endmonth = self.currentmonth\n for i in range(len(lines)):\n if lines[i].lower().find('runperiod,') != -1:\n if not self.real_time_flag:\n lines[i + 2] = ' ' + str(self.startmonth) + ', !- Begin Month' + '\\n'\n lines[i + 3] = ' ' + str(self.startday) + ', !- Begin Day of Month' + '\\n'\n lines[i + 4] = ' ' + str(self.endmonth) + ', !- End Month' + '\\n'\n lines[i + 5] = ' ' + str(self.endday) + ', !- End Day of Month' + '\\n'\n else:\n lines[i + 2] = ' ' + str(self.currentmonth) + ', !- Begin Month' + '\\n'\n lines[i + 3] = ' ' + str(\n self.currentday) + ', !- Begin Day of Month' + '\\n'\n lines[i + 4] = ' ' + str(endmonth) + ', !- End Month' + '\\n'\n lines[i + 5] = ' ' + str(endday) + ', !- End Day of Month' + '\\n'\n for i in range(len(lines)):\n if lines[i].lower().find('timestep,') != -1 and lines[i].lower().find('update frequency') == -1:\n if lines[i].lower().find(';') != -1:\n lines[i] = ' Timestep,' + str(self.timestep) + ';' + '\\n'\n else:\n lines[i + 1] = ' ' + str(self.timestep) + ';' + '\\n'\n if self.customizedOutT > 0:\n lines.append('ExternalInterface:Actuator,') + '\\n'\n lines.append(' outT, !- Name') + '\\n'\n lines.append(' Environment, !- Actuated Component Unique Name') + '\\n'\n lines.append(' Weather Data, !- Actuated Component Type') + '\\n'\n lines.append(' Outdoor Dry Bulb; !- Actuated Component Control Type') + '\\n'\n f = open(model_path, 'w')\n\n for i in range(len(lines)):\n f.writelines(lines[i])\n f.close()\n self.simulation = subprocess.Popen(cmd_str, shell=True)", "def run(self):\n self.__power_on()\n\n self.__main()", "def main():\n\n obj = PowerStoreNfsExport()\n obj.perform_module_operation()", "def run_installer():\n global DEBUG_ON\n global NM_AVAILABLE\n username = ''\n password = ''\n pfx_file = ''\n parser = argparse.ArgumentParser(description='eduroam linux installer.')\n parser.add_argument('--debug', '-d', action='store_true', dest='debug',\n default=False, help='set debug flag')\n args = parser.parse_args()\n if args.debug:\n DEBUG_ON = True\n print(\"Runnng debug mode\")\n\n debug(\"Calling InstallerData\")\n\n installer_data = InstallerData(username=username, password=password, pfx_file=pfx_file)\n\n # test dbus connection\n if NM_AVAILABLE:\n config_tool = CatNMConfigTool()\n if config_tool.connect_to_nm() is None:\n NM_AVAILABLE = False\n\n installer_data.get_user_cred()\n\n # get user credentials from file\n\n # installer_data.get_user_cred_from_file()\n installer_data.save_ca()\n if NM_AVAILABLE:\n config_tool.add_connections(installer_data)\n else:\n wpa_config = WpaConf()\n wpa_config.create_wpa_conf(Config.ssids, installer_data)\n debug(\"Installation finished.\")", "def main():\n obj = PowerMaxVolume()\n obj.perform_module_operation()", "def main(_):\n description = xm.ExperimentDescription(\n FLAGS.exp_name, tags=[\n FLAGS.env_name,\n ])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main(_):\n description = xm.ExperimentDescription(\n 'HIS - trial=%d' % FLAGS.trial, tags=['his'])\n experiment = build_experiment()\n xm.launch_experiment(description, experiment)", "def main():\n logging.basicConfig(format='[%(asctime)s] %(levelname)s %(message)s', level=logging.INFO)\n logging.info('** GillenWx Weather Station System Starting **')\n start_time = time.time()\n \n def signal_handler(*args):\n \"\"\" handles shutting down via signals \"\"\"\n if station:\n station.shutdown()\n\n try:\n signal(SIGTERM, signal_handler)\n signal(SIGHUP, signal_handler)\n\n\n station = WeatherStation() \n if not station.check_initialized():\n logging.error('Initialization failed. Exiting...')\n else:\n result = station.test_sensors()\n if result:\n station.run_loop()\n else:\n logging.error('Sensor tests failed. Exiting...')\n\n except KeyboardInterrupt:\n logging.info('Shutdown requested. Cleaning up...')\n station.shutdown()\n\n logging.info(\"Script Finished\")\n logging.info(\"Elapsed Time: %s seconds \", (time.time() - start_time))", "def main():\n set_params()\n\n if check_for_java() == False:\n module.fail_json(\n msg='Failed. No Java was found on the system. Please install JAVA to continue.',\n changed=False\n )\n\n if check_for_ofm():\n module.exit_json(\n msg='Oracle WebGate is already installed on the server',\n changed=False\n )\n\n if install_webgate():\n if create_webgate():\n module.exit_json(msg='Succesfully created IHS Oracle Webgate instance',changed=True)\n else:\n module.fail_json(msg='Failed to create Webgate instance',changed=False)\n else:\n module.fail_json(msg='Failed to install Oracle WebGate',changed=False)", "def main():\n\n # Install crypt32 (not required for Proton 3.16-3 and up)\n util.protontricks('crypt32')\n\n # Install directmusic, set overrides\n util.protontricks('directmusic')\n util.winedll_override('dmime', 'n')\n util.winedll_override('dmsynth', 'n')\n util.winedll_override('dmusic', 'n')\n util.winedll_override('dsound', 'n')\n util.winedll_override('dswave ', 'n')\n util.winedll_override('l3codecx', 'n')\n\n # Set sound to alsa\n util.protontricks('sound=alsa')\n\n # Disable launcher\n util.replace_command('patriots.exe', 'riseofnations.exe')", "def run():\n\twrite_fuel_data()", "def start_experiment():\r\n check_parameters()\r\n try:\r\n EXP.start()\r\n except InputError as inst:\r\n tkMessageBox.showinfo(inst.expr, inst.msg)", "def main():\n obj = PowerMaxJob()\n obj.perform_module_operation()", "def main():\n args, extra_args = MyArgumentParser().parse()\n\n # Verify that script is run as root\n if os.getuid():\n sys.stderr.write(\n \"This script needs superuser \" \"permissions to run correctly\\n\"\n )\n sys.exit(1)\n\n # Verify wakeup alarm can be scheduled\n if args.pm_operation != \"reboot\":\n WakeUpAlarm.check()\n\n LoggingConfiguration.set(args.log_level, args.log_filename, args.append)\n logging.debug(\"Arguments: {0!r}\".format(args))\n logging.debug(\"Extra Arguments: {0!r}\".format(extra_args))\n\n # Log deprecation warning\n if args.pm_operation in (\"suspend\", \"hibernate\"):\n logging.warning(\n \"{pm_operation!r} test case will be replaced \"\n \"with a new one based on fwts\".format(pm_operation=args.pm_operation)\n )\n\n try:\n operation = PowerManagementOperation(args, extra_args)\n operation.setup()\n operation.run()\n except (TestCancelled, TestFailed) as exception:\n operation.teardown()\n if isinstance(exception, TestFailed):\n logging.error(exception.args[0])\n message = exception.MESSAGE.format(args.pm_operation.capitalize())\n if args.silent:\n logging.info(message)\n else:\n title = \"{0} test\".format(args.pm_operation.capitalize())\n MessageDialog(title, message, gtk.MESSAGE_ERROR).run()\n\n return exception.RETURN_CODE\n\n return 0", "def main():\n run_program()", "def main():\n driver = Driver()\n driver.start()", "def main():\n Fire(cli)", "def evol_run(self, pkg_setup):\n\n params = specs.ODict()\n params[(\"tune\", \"soma\", \"Ra\")] = [100.0 * 0.5, 100 * 1.5]\n\n amps = [0.0, 0.65] # amplitudes\n times = [100, 200] # start times\n dur = 50 # ms\n targetRates = [0.0, 81.0]\n\n # initial cfg set up\n initCfg = {} # specs.ODict()\n initCfg[\"duration\"] = 200 * len(amps)\n initCfg[(\"hParams\", \"celsius\")] = 37\n\n initCfg[\"savePickle\"] = True\n initCfg[\"saveJson\"] = False\n initCfg[\"saveDataInclude\"] = [\"simConfig\", \"netParams\", \"net\", \"simData\"]\n\n initCfg[(\"IClamp1\", \"pop\")] = \"ITS4\"\n initCfg[(\"IClamp1\", \"amp\")] = amps\n initCfg[(\"IClamp1\", \"start\")] = times\n initCfg[(\"IClamp1\", \"dur\")] = 100\n\n initCfg[(\"analysis\", \"plotfI\", \"amps\")] = amps\n initCfg[(\"analysis\", \"plotfI\", \"times\")] = times\n initCfg[(\"analysis\", \"plotfI\", \"dur\")] = dur\n initCfg[(\"analysis\", \"plotfI\", \"targetRates\")] = targetRates\n\n for k, v in params.items():\n initCfg[k] = v[0] # initialize params in cfg so they can be modified\n\n # fitness function\n fitnessFuncArgs = {}\n fitnessFuncArgs[\"targetRates\"] = targetRates\n\n def fitnessFunc(simData, **kwargs):\n targetRates = kwargs[\"targetRates\"]\n\n diffRates = [abs(x - t) for x, t in zip(simData[\"fI\"], targetRates)]\n fitness = np.mean(diffRates)\n\n print(\" Candidate rates: \", simData[\"fI\"])\n print(\" Target rates: \", targetRates)\n print(\" Difference: \", diffRates)\n\n return fitness\n\n # create Batch object with paramaters to modify, and specifying files to use\n b = Batch(cfgFile='src/cfg.py', netParamsFile='src/netParams.py', params=params, initCfg=initCfg)\n\n # Set output folder, grid method (all param combinations), and run configuration\n b.batchLabel = \"ITS4_evol\"\n b.saveFolder = \"/tmp/\" + b.batchLabel\n b.method = \"evol\"\n b.seed = 0\n b.runCfg = {\"type\": \"mpi_bulletin\", \"script\": \"init.py\"}\n b.evolCfg = {\n \"evolAlgorithm\": \"custom\",\n \"fitnessFunc\": fitnessFunc, # fitness expression (should read simData)\n \"fitnessFuncArgs\": fitnessFuncArgs,\n \"pop_size\": 2,\n \"num_elites\": 1, # keep this number of parents for next generation if they are fitter than children\n \"mutation_rate\": 0.4,\n \"crossover\": 0.5,\n \"maximize\": False, # maximize fitness function?\n \"max_generations\": 1,\n \"time_sleep\": 0.25, # wait this time before checking again if sim is completed (for each generation)\n \"maxiter_wait\": 20, # max number of times to check if sim is completed (for each generation)\n \"defaultFitness\": 1000, # set fitness value in case simulation time is over\n }\n # Run batch simulations\n b.run()", "def main(options, args, data_dict, EngineClass=StdEngine) :\n global _data\n _data = data_dict\n # Set the logging facility.\n syslog.openlog(options.log_label, syslog.LOG_PID | syslog.LOG_CONS)\n\n # Set up the signal handlers.\n signal.signal(signal.SIGHUP, sigHUPhandler)\n signal.signal(signal.SIGTERM, sigTERMhandler)\n\n syslog.syslog(syslog.LOG_INFO, \"engine: Initializing weewx version %s\" % weewx.__version__)\n syslog.syslog(syslog.LOG_INFO, \"engine: Using Python %s\" % sys.version)\n syslog.syslog(syslog.LOG_INFO, \"engine: Platform %s\" % platform.platform())\n\n # Save the current working directory. A service might\n # change it. In case of a restart, we need to change it back.\n cwd = os.getcwd()\n\n if options.daemon:\n syslog.syslog(syslog.LOG_INFO, \"engine: pid file is %s\" % options.pidfile)\n daemon.daemonize(pidfile=options.pidfile)\n\n # be sure that the system has a reasonable time (at least 1 jan 2000).\n # log any problems every minute.\n ts = time.time()\n n = 0\n while ts < 946684800:\n if n % 120 == 0:\n syslog.syslog(syslog.LOG_INFO,\n \"engine: waiting for sane time. current time is %s\"\n % weeutil.weeutil.timestamp_to_string(ts))\n n += 1\n time.sleep(0.5)\n ts = time.time()\n\n while True:\n\n os.chdir(cwd)\n\n config_path = os.path.abspath(args[0])\n config_dict = getConfiguration(config_path)\n\n # Look for the debug flag. If set, ask for extra logging\n weewx.debug = int(config_dict.get('debug', 0))\n if weewx.debug:\n syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))\n else:\n syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_INFO))\n\n try:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Initializing engine\")\n\n # Create and initialize the engine\n engine = EngineClass(config_dict)\n \n syslog.syslog(syslog.LOG_INFO, \"engine: Starting up weewx version %s\" % weewx.__version__)\n\n # Start the engine. It should run forever unless an exception occurs. Log it\n # if the function returns.\n engine.run()\n syslog.syslog(syslog.LOG_CRIT, \"engine: Unexpected exit from main loop. Program exiting.\")\n \n # Catch any console initialization error:\n except InitializationError, e:\n # Log it:\n syslog.syslog(syslog.LOG_CRIT, \"engine: Unable to load driver: %s\" % e)\n # See if we should loop, waiting for the console to be ready, or exit:\n if options.loop_on_init:\n syslog.syslog(syslog.LOG_CRIT, \" **** Waiting 60 seconds then retrying...\")\n time.sleep(60)\n syslog.syslog(syslog.LOG_NOTICE, \"engine: retrying...\")\n else:\n syslog.syslog(syslog.LOG_CRIT, \" **** Exiting...\")\n sys.exit(weewx.IO_ERROR)\n\n # Catch any recoverable weewx I/O errors:\n except weewx.WeeWxIOError, e:\n # Caught an I/O error. Log it, wait 60 seconds, then try again\n syslog.syslog(syslog.LOG_CRIT, \"engine: Caught WeeWxIOError: %s\" % e)\n if options.exit:\n syslog.syslog(syslog.LOG_CRIT, \" **** Exiting...\")\n sys.exit(weewx.IO_ERROR)\n syslog.syslog(syslog.LOG_CRIT, \" **** Waiting 60 seconds then retrying...\")\n time.sleep(60)\n syslog.syslog(syslog.LOG_NOTICE, \"engine: retrying...\")\n \n except weedb.OperationalError, e:\n # Caught a database error. Log it, wait 120 seconds, then try again\n syslog.syslog(syslog.LOG_CRIT, \"engine: Caught database OperationalError: %s\" % e)\n if options.exit:\n syslog.syslog(syslog.LOG_CRIT, \" **** Exiting...\")\n sys.exit(weewx.DB_ERROR)\n syslog.syslog(syslog.LOG_CRIT, \" **** Waiting 2 minutes then retrying...\")\n time.sleep(120)\n syslog.syslog(syslog.LOG_NOTICE, \"engine: retrying...\")\n \n except OSError, e:\n # Caught an OS error. Log it, wait 10 seconds, then try again\n syslog.syslog(syslog.LOG_CRIT, \"engine: Caught OSError: %s\" % e)\n weeutil.weeutil.log_traceback(\" **** \", syslog.LOG_DEBUG)\n syslog.syslog(syslog.LOG_CRIT, \" **** Waiting 10 seconds then retrying...\")\n time.sleep(10)\n syslog.syslog(syslog.LOG_NOTICE,\"engine: retrying...\")\n \n except Restart:\n syslog.syslog(syslog.LOG_NOTICE, \"engine: Received signal HUP. Restarting.\")\n\n except Terminate:\n syslog.syslog(syslog.LOG_INFO, \"engine: Terminating weewx version %s\" % weewx.__version__)\n sys.exit()\n\n # Catch any keyboard interrupts and log them\n except KeyboardInterrupt:\n syslog.syslog(syslog.LOG_CRIT,\"engine: Keyboard interrupt.\")\n # Reraise the exception (this should cause the program to exit)\n raise\n \n # Catch any non-recoverable errors. Log them, exit\n except Exception, ex:\n # Caught unrecoverable error. Log it, exit\n syslog.syslog(syslog.LOG_CRIT, \"engine: Caught unrecoverable exception in engine:\")\n syslog.syslog(syslog.LOG_CRIT, \" **** %s\" % ex)\n # Include a stack traceback in the log:\n weeutil.weeutil.log_traceback(\" **** \", syslog.LOG_CRIT)\n syslog.syslog(syslog.LOG_CRIT, \" **** Exiting.\")\n # Reraise the exception (this should cause the program to exit)\n raise", "def simulate(self, **kwargs):\n # First, update keys with new values\n for key, value in kwargs.items():\n if f\"_{key}\" in self.__dict__.keys():\n setattr(self, key, value)\n\n if self.as_version != EnergyPlusVersion(self.idd_version):\n raise EnergyPlusVersionError(\n None, self.idfname, EnergyPlusVersion(self.idd_version), self.as_version\n )\n\n start_time = time.time()\n include = self.include\n if isinstance(include, str):\n include = Path().abspath().glob(include)\n elif include is not None:\n include = [Path(file) for file in include]\n\n # check if a weather file is defined\n if not getattr(self, \"epw\", None):\n raise EnergyPlusWeatherError(\n f\"No weather file specified with {self}. Set 'epw' in IDF(\"\n f\"filename, epw='weather.epw').simulate() or in IDF.simulate(\"\n f\"epw='weather.epw')\"\n )\n\n # Todo: Add EpMacro Thread -> if exist in.imf \"%program_path%EPMacro\"\n # Run the expandobjects program if necessary\n with TemporaryDirectory(\n prefix=\"expandobjects_run_\",\n suffix=None,\n dir=self.output_directory,\n ) as tmp:\n # Run the ExpandObjects preprocessor program\n expandobjects_thread = ExpandObjectsThread(self, tmp)\n expandobjects_thread.start()\n expandobjects_thread.join()\n e = expandobjects_thread.exception\n if e is not None:\n raise e\n\n # Run the Basement preprocessor program if necessary\n with TemporaryDirectory(\n prefix=\"runBasement_run_\",\n suffix=None,\n dir=self.output_directory,\n ) as tmp:\n basement_thread = BasementThread(self, tmp)\n basement_thread.start()\n basement_thread.join()\n e = basement_thread.exception\n if e is not None:\n raise e\n\n # Run the Slab preprocessor program if necessary\n with TemporaryDirectory(\n prefix=\"runSlab_run_\",\n suffix=None,\n dir=self.output_directory,\n ) as tmp:\n slab_thread = SlabThread(self, tmp)\n slab_thread.start()\n slab_thread.join()\n e = slab_thread.exception\n if e is not None:\n raise e\n\n # Run the energyplus program\n with TemporaryDirectory(\n prefix=\"eplus_run_\",\n suffix=None,\n dir=self.output_directory,\n ) as tmp:\n running_simulation_thread = EnergyPlusThread(self, tmp)\n running_simulation_thread.start()\n running_simulation_thread.join()\n e = running_simulation_thread.exception\n if e is not None:\n raise e\n return self", "def main():\n\n # Fix crackling audio\n util.set_environment('PULSE_LATENCY_MSEC', '60')\n\n # Replace launcher with game exe in proton arguments\n util.replace_command('FF9_Launcher.exe', 'x64/FF9.exe')", "def main():\n lake_drivers = Dynamic_Lake_Drivers()\n #lake_drivers.prepare_orography_ICE5G_0k_uncorrected()\n #lake_drivers.prepare_orography_ICE5G_0k_corrected()\n #lake_drivers.prepare_orography_ICE6G_21k_corrected()\n #lake_drivers.prepare_river_directions_with_depressions_from_glac1D()\n #lake_drivers.evaluate_glac1D_ts1900_basins()\n #import time\n # start = time.time()\n #lake_drivers.evaluate_ICE6G_lgm_basins()\n # end = time.time()\n # print(end - start)\n #lake_drivers.prepare_basins_from_glac1D()\n #lake_drivers.extract_lake_volumes_from_glac1D_basins()\n #lake_drivers.connect_catchments_for_glac1D()\n lake_drivers.connect_catchments_for_transient_run()\n #lake_drivers.extract_volumes_for_transient_run()\n #lake_drivers.add_10min_rmouth_to_transient_data()\n #lake_drivers.expand_transient_data_catchments_to_include_rmouth()\n #lake_drivers.remove_no_data_values_from_upscaled_MERIT_correction_set()\n #lake_drivers.remove_disconnected_points_from_slm()", "def main():\n\n BASIC.run(PROGRAM)", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def set_gsenergy(self, gsenergy=None):\n self.status()\n if not gsenergy:\n if self.__cod == 'vasp': \n #getData = VASP()\n getData = vasp.Energy()\n outfile = 'vasprun.xml'\n elif self.__cod == 'espresso':\n getData = espresso.Energy()\n outfile = 'espresso.out'\n elif self.__cod == 'wien':\n getData = wien.Energy()\n \n elif self.__cod == 'exciting':\n getData = exciting.Energy()\n outfile = 'INFO.OUT'\n elif self.__cod == 'emto':\n getData = emto.Energy(funct=self.__funct)\n outfile = '%s/prn/%s'%(self.__pname,self.__emtoout)\n gsenergy=[] \n for atoms in sorted(self.__structures.items()):\n \n if self.__cod == 'wien': \n outfile = atoms[1].path.split('/')[-1] + '.scf'\n \n if not atoms[1].status:\n #print atoms[1].status\n atoms[1].gsenergy = 0\n continue\n if atoms[1].exclude:\n atoms[1].gsenergy_ignored = getData.get_gsenergy()\n atoms[1].gsenergy = 0\n continue\n if os.path.exists(atoms[1].path+'/exclude'):\n atoms[1].gsenergy_ignored = getData.get_gsenergy()\n atoms[1].gsenergy = 0\n continue\n \n \n #getData.set_outfile('%s/%s/'%atoms[0] + outfile)\n #getData.set_gsEnergy()\n #print atoms[1].path, self.__workdir + '%s/%s'%(atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile\n #getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile)\n if 'eta' in atoms[1].path.split('/')[-1] and self.__thermodyn:getData.set_fname(self.__workdir + '%s/%s/%s'%(atoms[1].path.split('/')[-3],atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile)\n elif 'eta' in atoms[1].path.split('/')[-1] and not self.__thermodyn:getData.set_fname(self.__workdir + '%s/%s'%(atoms[1].path.split('/')[-2],atoms[1].path.split('/')[-1])+'/' + outfile)\n else: getData.set_fname(self.__workdir + '%s'%(atoms[1].path.split('/')[-1])+'/' + outfile)\n print getData.get_fname()\n getData.set_gsenergy()\n if self.__thermodyn and self.__mod!='structures_phonons':\n outfile_ph = 'F_TV'\n #getData.set_fname(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile_ph)\n #getData.T = self.__T\n \n getData.set_phenergy(self.__workdir + '%s/'%atoms[1].path.lstrip('.') + outfile_ph)\n atoms[1].phenergy = getData.get_phenergy()\n atoms[1].T = getData.T\n #atoms[1].gsenergy = getData.get_gsEnergy()\n atoms[1].gsenergy = getData.get_gsenergy()/125.\n else:\n atoms[1].gsenergy = getData.get_gsenergy()\n gsenergy.append(atoms[1].gsenergy)\n \n if self.delPoints:\n for atoms in sorted(self.__structures.items()):\n \n #print [atoms[1].eta for atoms in sorted(self.__structures.items())], gsenergy\n coeff = np.polyfit([atoms[1].eta for atoms in self.__structures.items()], gsenergy, 2)\n p = np.poly1d(coeff)\n k=0\n for (etas,energy) in zip(self.__structures.items(),gsenergy):\n #print (energy-p(etas[1].eta))**2.\n if (energy-p(etas[1].eta))**2. > 0.0004: \n gsenergy[k]=0.\n atoms[1].gsenergy = 0. \n k+=1\n self.__gsenergy = gsenergy", "def run():\n \n logger = logging.getLogger(\"galah.sheep.producer\")\n \n\t# Initialize the correct producer based on the selected virtual suite.\n virtual_suite = get_virtual_suite(config[\"VIRTUAL_SUITE\"])\n producer = virtual_suite.Producer(logger)\n\n logger.info(\"Producer is starting\")\n \n # Loop until the program is shutting down\n while not universal.exiting:\n producer.produce_vm()", "def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._estimate_edens_()\n self._compute_()\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n if self.verbose: print(\"\\n Processing Doppler.\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def main():\n bp = Bin_API('COM10')\n print('Testing')\n\n bp.set_hwtrig_term(1)", "def executable():\n\n if len(sys.argv) == 1:\n arguments.get_help()\n sys.exit('\\nGive me something to do and I will do it\\n')\n else:\n # Parse the Arguments that have been provided\n args = arguments.get_args()\n\n # Load The System Logger\n log = logger.load_in(log_level=args.get('log_level', 'info'))\n log.debug('Used Arguments %s', args)\n const(log_method=log)\n\n # Begin Work\n start(set_args=args)" ]
[ "0.6986542", "0.6365376", "0.6232239", "0.622486", "0.6160507", "0.6088555", "0.60638857", "0.6017408", "0.59845275", "0.59615433", "0.59400517", "0.59070885", "0.5902622", "0.5880955", "0.586793", "0.5867335", "0.5855683", "0.58437485", "0.58404374", "0.5810828", "0.57694745", "0.57617325", "0.5747684", "0.5734515", "0.5727247", "0.5711489", "0.5701301", "0.56948286", "0.56796414", "0.56709224" ]
0.7071827
0
Filter generated blocks so that API only sees unique blocks
def unique_blocks_only(gen): seen = set() count = 0 for item in gen: key = tuple([int(x) for x in item]) if key not in seen: seen.add(key) yield item count += 1 # log.info("%s/%s were unique blocks",len(seen),count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FilterBlocks(blocks, filter_func):\n # We retain the 'special' block at the end.\n res = [b for b in blocks[:-1] if filter_func(b)]\n res.append(blocks[-1])\n return res", "def _genBlocksByName(self):\n self.blocksByName = {\n block.getName(): block for block in self.getBlocks(includeAll=True)\n }", "def compute_unique_blocks(self):\n\n unique_blocks = OrderedDict()\n for seqname, rec in self.sequences_with_annotated_blocks().items():\n blocks_locations = (\n [(0, 0)]\n + sorted(\n [\n (f.location.start, f.location.end)\n for f in rec.features\n if f.qualifiers.get(\"is_block\", False)\n ]\n )\n + [(len(rec), len(rec))]\n )\n unique_blocks[seqname] = [\n (end1, start2)\n for (_, end1), (start2, _) in zip(\n blocks_locations, blocks_locations[1:]\n )\n if (start2 - end1) > 1\n ]\n return unique_blocks", "def block_seen(self):\n self.blocklist.update(self.mapping.values())\n self.mapping = dict()", "def filter_blocks(blocks):\n new_blocks = []\n for block in blocks:\n if any(l.startswith('-<rdf:RDF') for l in block):\n continue\n if any(l.startswith('-<math') for l in block):\n continue\n if any(l.startswith('-<sbml') for l in block):\n continue\n if any(l.startswith('-<body') for l in block):\n continue\n if any('&apos;' in l for l in block):\n continue\n new_blocks.append(block)\n return new_blocks", "def test_filter_transaction_block_coin_holder(self):\n self._attempt_list_storage.gateway_transaction_exists.return_value = False\n self._map_storage.coin_address_exists.return_value = True\n transaction = Transaction(tx='723968', receivers=[self._gateway_coin_holder_receiver])\n res = self._coin_transaction_consumer_impl.filter_transaction(transaction)\n self.assertFalse(res)\n self._map_storage.coin_address_exists.assert_called_once_with(self._gateway_coin_holder_receiver.address)\n self._attempt_list_storage.gateway_transaction_exists.assert_called_once_with(transaction.tx)", "def inside_first_filter():\n print(\"inside_first_filter\")\n if len(gCodeBlocks) == 0:\n print(\"no gcode loaded: cannot apply filter\")\n return\n block_to_filter = gCodeBlocks[-1]\n\n g01blocks = block_to_filter.g01blocks\n ng01 = len(g01blocks)\n\n while True:\n swp = False\n for i in range(ng01-1):\n for j in range(i+1, ng01):\n if g01blocks[i].contains(g01blocks[j]):\n g01blocks[i], g01blocks[j] = g01blocks[j], g01blocks[i]\n swp = True\n\n if not swp:\n break\n\n # rearrange original lines\n block_to_filter.lines = []\n for g01block in block_to_filter.g01blocks:\n for line in g01block.lines:\n block_to_filter.lines.append(line)", "def verify_response_block_list(self, response):\n self.assertSetEqual(\n {block['id'] for block in response.data},\n self.non_orphaned_block_usage_keys,\n )", "def _merge_block(internal_transactions, transactions, whitelist):\n transactions_by_id = {\n (transaction[\"hash\"], transaction[\"blockHash\"]): transaction\n for transaction in transactions\n }\n for transaction in internal_transactions:\n hash = transaction[\"transactionHash\"]\n block = transaction[\"blockHash\"]\n if (hash, block) in transactions_by_id:\n whitelisted_fields = {\n key: value\n for key, value in transactions_by_id[(hash, block)].items()\n if key in whitelist\n }\n transaction.update(whitelisted_fields)\n del transactions_by_id[(hash, block)]\n return internal_transactions", "def test_block_bad_batch(self):\n pass", "def test_stochatreat_block_ids(df, block_cols):\n treats = stochatreat(\n data=df,\n block_cols=block_cols,\n treats=2,\n idx_col=\"id\",\n random_state=42,\n )\n\n n_unique_blocks = len(df[block_cols].drop_duplicates())\n\n n_unique_block_ids = len(treats[\"block_id\"].drop_duplicates())\n\n np.testing.assert_equal(n_unique_block_ids, n_unique_blocks)", "def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1", "def consolidate_empty_blocks(self):\n new_blocks = []\n for block in self.blocks:\n if isinstance(block, BasicBlock) and not block.statements:\n self.remove_block(block)\n else:\n new_blocks.append(block)\n self.blocks = new_blocks", "def reconsiderblock(self, block_hash: str) -> None:\n return self.rpc_call(\"reconsiderblock\", block_hash)", "def test_block_extra_batch(self):\n pass", "def prune_redundant_blocks(hpo_data):\n\n for hpo, hdat in hpo_data.items():\n if len(hdat['blocks']) == 0:\n continue\n\n # First step: make a graph of all blocks where edges indicate overlapping credible intervals\n G = nx.Graph()\n G.add_nodes_from(hdat['blocks'].keys())\n cs_bt_strs = []\n for bid, bdat in hdat['blocks'].items():\n cs_bt_strs += ['{}\\t{}\\t{}\\t{}\\n'.format(*x, bid) for x in bdat['credset_coords']]\n cs_bt = pbt.BedTool(''.join(cs_bt_strs), from_string=True)\n for hit in cs_bt.sort().merge(c=4, o='distinct'):\n bids = hit[3].split(',')\n if len(bids) > 1:\n for bid_a in bids:\n for bid_b in bids:\n if bid_a != bid_b:\n G.add_edge(bid_a, bid_b)\n\n # Second step: resolve subgraphs with multiple nodes\n for g in nx.connected_components(G):\n if len(g) > 1:\n # Gather evidence for each block (significance level and size)\n criteria = {bid : (hdat['blocks'][bid]['credset_max_sig'], \n np.sum([x.length for x in hdat['blocks'][bid]['credset_bt']])) \\\n for bid in g}\n # Keep blocks with higher significance level (GW over FDR)\n # Break ties by taking larger block\n criteria = {k : v for k, v in sorted(criteria.items(), \n key=lambda x: x[1][1], \n reverse=True)}\n criteria = {k : v for k, v in sorted(criteria.items(), \n key=lambda x: x[1][0].lower(), \n reverse=True)}\n for i, bid in enumerate(criteria.keys()):\n if i > 0:\n hpo_data[hpo]['blocks'].pop(bid)\n\n return hpo_data", "def blocks(self): # -> BlockView:\n ...", "def test_block_missing_batch(self):\n pass", "def _unique_beams(self):\n bmap, mask = self.single_pointing_telescope._unique_beams()\n block_bmap = linalg.block_diag(*[bmap+i*self.single_pointing_telescope.nfeed for i, _ in enumerate(self.pointings)])\n block_mask = linalg.block_diag(*[mask for _ in self.pointings])\n\n return block_bmap, block_mask", "def genBlocksByLocName(self):\n self.blocksByLocName = {\n block.getLocation(): block for block in self.getBlocks(includeAll=True)\n }", "def verify_response_block_dict(self, response):\n self.assertSetEqual(\n set(response.data['blocks'].keys()),\n self.non_orphaned_block_usage_keys,\n )", "def get_specific_blocks(blocks, bl_type):\n specific_blocks = []\n\n for b in blocks:\n if (b.get(bl_type, None)):\n specific_blocks.append(b)\n\n return specific_blocks", "def trimDups( options, data ):\n for c in data.chrNames:\n prevBlock = MafBlock()\n replacement = []\n if c not in data.mafBlocksByChrom:\n data.mafBlocksByChrom[ c ] = replacement\n continue\n for m in data.mafBlocksByChrom[ c ]:\n if m.refStart <= prevBlock.refEnd:\n if m.refEnd > prevBlock.refEnd:\n # only add in the new, distinct, bases\n m.refStart = prevBlock.refEnd + 1\n else:\n # this block is totally covered by the previous block\n continue\n replacement.append( m )\n prevBlock = m\n data.mafBlocksByChrom[ c ] = replacement", "def filter(self, filters):", "async def _new_blocks(self) -> AsyncGenerator[Eth1Block, None]:\n while True:\n try:\n block = self._eth1_data_provider.get_block(\"latest\")\n except BlockNotFound:\n raise Eth1MonitorValidationError(\"Fail to get latest block\")\n target_block_number = BlockNumber(block.number - self._num_blocks_confirmed)\n from_block_number = self.highest_processed_block_number\n if target_block_number > from_block_number:\n # From `highest_processed_block_number` to `target_block_number`\n for block_number in range(\n from_block_number + 1, target_block_number + 1\n ):\n try:\n block = self._eth1_data_provider.get_block(\n BlockNumber(block_number)\n )\n except BlockNotFound:\n raise Eth1MonitorValidationError(\n f\"Block does not exist for block number={block_number}\"\n )\n yield block\n await trio.sleep(self._polling_period)", "def _iterate_blocks(self):\n ranges = [host_tuple[0:2] for host_tuple in self.parity_hosts]\n flags_sql = \"SELECT id, value FROM {} FINAL WHERE name = 'traces_extracted'\".format(self.indices[\"block_flag\"])\n return self.client.iterate(\n index=self.indices[\"block\"],\n fields=[\"number\"],\n query=\"ANY LEFT JOIN ({}) USING id WHERE value IS NULL AND {}\".format(\n flags_sql,\n utils.make_range_query('number', *ranges)\n ),\n )", "def filter_completed_blocks(required_blocks, completed_activities):\n required_block_filter_list = []\n\n for activity_block in required_blocks:\n for block_completed in completed_activities:\n if block_completed.block_id == activity_block.block_id:\n required_block_filter_list.append(block_completed)\n\n return required_block_filter_list", "def unique_filter(rows):\n old_row = {}\n row = None\n for row in rows:\n row_data = dict(row)\n try:\n del row_data['_id']\n del row_data['das']\n del row_data['das_id']\n del row_data['cache_id']\n except:\n pass\n old_data = dict(old_row)\n try:\n del old_data['_id']\n del old_data['das']\n del old_data['das_id']\n del old_data['cache_id']\n except:\n pass\n if row_data == old_data:\n continue\n if old_row:\n yield old_row\n old_row = row\n yield row", "def remove_blocks(self, block_ids):\n self.smd3.remove_blocks(block_ids)\n self.logic.update(self.smd3)\n self.header.update(self.smd3)", "def remove_blocks(draft):\n for symbol in draft.Blocks:\n if symbol.Name in blocks_to_delete:\n print(\"[-] %s, \\tdeleted\" % symbol.Name)\n symbol.delete()\n\n # for ball in draft.ActiveSheet.Balloons:\n if draft.Balloons:\n for ball in draft.Balloons:\n if ball.BalloonType == 7: # type 7 filter the triangle balloons.\n print(\"[-] %s, \\tdeleted\" % ball.Name)\n ball.Delete()\n else:\n pass" ]
[ "0.6391811", "0.596172", "0.5878976", "0.586717", "0.58146787", "0.5781387", "0.57403624", "0.56411433", "0.5627675", "0.5621748", "0.5612627", "0.560742", "0.557485", "0.5542953", "0.5502141", "0.55009717", "0.54874545", "0.542891", "0.5417295", "0.5398559", "0.5390584", "0.5368763", "0.5360376", "0.5341695", "0.5331631", "0.53315234", "0.5327669", "0.5322296", "0.53066516", "0.5298007" ]
0.63568914
1
change attributes for all tracks
def changeattributes(self, *_, inplace = True, **kwa) -> 'TracksDict': assert len(_) == 0 this = self if inplace else self.clone() for track in this.values(): for i, j in kwa.items(): setattr(track, i, j) return this
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_attributes(self):", "def update(self):\n \n for track in self.tracks:\n track.update()", "def _save(self):\n for attrib in self.attribs:\n setattr(self, attrib, getattr(self.obj, attrib))", "def set_attributes(self, attributes):\n self.attributes = attributes", "def update(self, attributes):\n for key in attributes:\n k = key.lower()\n if not isinstance(attributes[key], str) or attributes[key] != '':\n k_ = k.strip(' =:\\t\\n').replace('', '')\n self.attributes.update({k_: attributes[key]})\n elif k in self.attributes:\n del self.attributes[k]", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def change_metadata(self, **kwargs):\n metadata = self.state.get_player_state(PLAYER_IDENTIFIER)\n\n # Update saved metadata\n for key, value in kwargs.items():\n setattr(metadata, key, value)\n\n # Create a temporary metadata instance with requested parameters\n change = PlayingState(**kwargs)\n self.state.item_update(change, PLAYER_IDENTIFIER)", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def set_attributes(self, new_attributes=None):\n self.attributes = new_attributes", "def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))", "def set_nowplaying_metadata(self, track, album, artist):\n\n\t\tparts = [artist[:30], album[:30], track[:30]]\n\t\tself._send_message(\"MUSIC_CONTROL\", self._pack_message_data(16, parts))", "def _track_attr(self):\n self._track_item['joint_pos'].append(self.joint_pos.copy())\n self._track_item['action'].append(self.action.copy())\n self._track_item['velocity'].append(self.sim.data.qvel[:6].copy())\n self._track_item['position'].append(self.sim.data.qpos[:3].copy())\n self._track_item['true_joint_pos'].append(self.sim.data.qpos[-self._num_joints:].copy())\n self._track_item['sensordata'].append(self.sim.data.sensordata.copy())\n self._track_item['qpos'].append(self.sim.data.qpos.copy())\n self._track_item['qvel'].append(self.sim.data.qvel.copy())\n ob = self._get_obs()\n self._track_item['achieved_goal'].append(ob['achieved_goal'].copy())\n self._track_item['observation'].append(ob['observation'].copy())\n self._track_item['desired_goal'].append(ob['desired_goal'].copy())\n self._track_item['omega_o'].append(self.omega.copy())\n self._track_item['omega'].append(self.w.copy())\n self._track_item['z'].append(self.z.copy())\n self._track_item['mu'].append(self.mu.copy())\n self._track_item['d1'].append(np.array([self.d1], dtype = np.float32))\n self._track_item['d2'].append(np.array([self.d2], dtype = np.float32))\n self._track_item['d3'].append(np.array([self.d3], dtype = np.float32))\n self._track_item['stability'].append(np.array([self.stability], dtype = np.float32))", "def update(self, attributes, ifAll=False, forceGraphInfo=False):\n for key in attributes:\n k = key.lower().replace('', '')\n try:\n if k in self.headersKeys:\n if attributes[key] != '':\n self.headers.update({k: attributes[key]})\n elif k in self.headers:\n del self.headers[k]\n elif (k in self.graphInfoKeys or forceGraphInfo\n or k.startswith('subplots')):\n if attributes[key] != '':\n self.graphInfo.update({k: attributes[key]})\n elif k in self.graphInfo:\n del self.graphInfo[k]\n # by default nothing in sampleInfo, everything in curves\n else:\n if ifAll:\n for i in range(self.length()):\n self.curve(i).update({k: attributes[key]})\n else:\n self.curve(-1).update({k: attributes[key]})\n except Exception as e:\n print('Error Graph.update: key', key, ' attributes',\n attributes, 'exception', e)", "def set_artists(audio: EasyID3, artists):\r\n audio['artist'] = artists\r\n audio.save()", "def attributes(self, attributes):\n\n self._attributes = attributes", "def attributes(self, attributes):\n\n self._attributes = attributes", "def update(self, **kwargs):\n for key, value in kwargs.items():\n key = key.upper()\n if not hasattr(self, key):\n self.logger.info(f'[✗] Ignore unknown attribute \"{key}\"')\n else:\n setattr(self, key, value)\n self.logger.info(f'[✓] Attribute \"{key}\" has been updated to \"{value}\"')\n\n assert self.UI in self._SUPPORT_UI, 'unsupported UI'\n assert self.MODE in self._SUPPORT_MODE, 'unsupported MODE'", "def set_attributes(self, attributes: typing.Dict[str, types.AttributeValue]) -> None:\n if not attributes:\n return\n for key, value in attributes.items():\n self.set_attribute(key, value)", "def set_track_info(self, payload):\n self.raw_trackname = payload['currentTrack'].get('title', \"\")\n self.artist = payload['currentTrack'].get('artist', \"\")\n self.album = payload['currentTrack'].get('album', \"\")\n self.station = payload['currentTrack'].get('stationName', \"\")\n\n if sonos_settings.artist_and_album_newlook :\n if self.raw_trackname.startswith(\"x-sonosapi-\") :\n self.raw_trackname = self.station\n\n if self.artist == self.station and self.type == \"radio\" :\n if self.raw_trackname.count(\"~\") : c = \"~\"\n elif self.raw_trackname.count(\"˗\") : c = \"˗\"\n elif self.raw_trackname.count(\"*\") : c = \"*\"\n elif self.raw_trackname.count(\"|\") : c = \"|\"\n elif self.raw_trackname.count(\" - \") : c = \" - \"\n elif self.raw_trackname.count(\" / \") : c = \" / \"\n else : c = \"\"\n\n if c :\n oldstr=self.raw_trackname.casefold()\n splitstr = oldstr.split(c)\n self.artist = ' '.join(word[0].upper() + word[1:] for word in splitstr[0].split())\n self.raw_trackname = ' '.join(word[0].upper() + word[1:] for word in splitstr[1].split())\n if c == \"~\" :\n self.album = ' '.join(word[0].upper() + word[1:] for word in splitstr[2].split())\n else :\n self.album = \"\"\n# self.album = self.station\n\n # Abort update if all data is empty\n if not any([self.album, self.artist, self.duration, self.station, self.raw_trackname]):\n _LOGGER.debug(\"No data returned by the API, skipping update\")\n return None\n\n if self.type == \"radio\" and not self.station:\n # if not then try to look it up (usually because its played from Alexa)\n self.station = find_unknown_radio_station_name(self.raw_trackname)\n\n # Clear uninteresting tracknames\n if self.raw_trackname.startswith(\"x-sonosapi-\") or self.raw_trackname.endswith(\".m3u8\"):\n self.trackname = \"\"\n else:\n self.trackname = self.raw_trackname\n\n\n track_id = self.artist\n if self.trackname:\n track_id += f\" - {self.trackname}\"\n if self.album:\n track_id += f\" ({self.album})\"\n if self.duration:\n track_id += f\" - {timedelta(seconds=self.duration)}\"\n if self.station:\n track_id += f\" [{self.station}]\"\n\n return track_id", "def update(self, new_attrs):\n self.last_update = round(time())\n self.attrs.update(new_attrs)", "def updateAttrs(self, kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)", "def set_attributes(object, attributes):\n for name, attribute in attributes.items():\n setattr(object, name, attribute)", "def store_attrs(self, attrs):\n self.get_attr().SetObject(dumps(attrs), False)", "def setItunesAttribute(self,key,value):\n self.itunesAttributes[key] = value", "def test_update_metadata_by_attribute(self):\n pass", "def OnAttributesUpdated():\n pass", "def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "def change_track(name, track_name):\n tree = ET.parse(open(name))\n config = tree.getroot()\n tracks = config.find(\".//section[@name='Tracks']\")\n track = tracks.find(\"./section[@name='1']\")\n assert track is not None\n\n change_track_name(track, track_name)\n tree.write(open(name, 'wb'))" ]
[ "0.64604264", "0.61889005", "0.60451776", "0.5901282", "0.5860429", "0.5837633", "0.5837633", "0.5837633", "0.58174413", "0.5739532", "0.5694126", "0.5686006", "0.5686006", "0.5678369", "0.567754", "0.5617913", "0.5544684", "0.5544684", "0.5522162", "0.552065", "0.55064726", "0.5499998", "0.54807866", "0.5451549", "0.5446411", "0.54403347", "0.5438436", "0.54260427", "0.5424316", "0.5403509" ]
0.73280555
0
Return the cloned TracksDict corresponding to the current selected items
def _dictview(self) -> TracksDict: return self._items[self._keys, self._beads] # type: ignore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset_track_lst(self):\n del self._track_item\n self._track_item = {key : [] for key in self._track_lst}\n return self._track_item", "def checkpoint_items(self):\n items = dict(backbone=self.backbone, head=self.head)\n if self.decoder is not None:\n items.update(decoder=self.decoder)\n return items", "def get_selection_dct(self) -> typing.Optional[dict]:\n if self.scanlist is None:\n return None\n add_rfid_lst = self.scanlist.get_active_tags()\n print(\"newstock {}\".format(add_rfid_lst))\n if len(add_rfid_lst) == 0:\n return None\n # find the location selected...\n # NOTE: no longer user-defined location; instead must be configured (serverconfig)\n # just set this to None for now\n locid = None\n return {'rfids': add_rfid_lst, 'location': locid}", "def copy(self):\n return self.__class__(self.items, self.is_cloud)", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def getselected_nodes(self):\n self.selected_nodes = {}\n for path in self.options.selected_nodes:\n sel_data = path.rsplit(':', 2)\n path_id = sel_data[0]\n sub_path = int(sel_data[1])\n sel_node = int(sel_data[2])\n if path_id not in self.selected_nodes:\n self.selected_nodes[path_id] = {sub_path: [sel_node]}\n else:\n if sub_path not in self.selected_nodes[path_id]:\n self.selected_nodes[path_id][sub_path] = [sel_node]\n else:\n self.selected_nodes[path_id][sub_path].extend([sel_node])", "def copyItem(self):\n # extract all selected item\n itms = []\n for item in self.scene.selectedItems():\n if isinstance(item, DiagramItem):\n itms.append(item.data)\n\n # pickle data\n mime = QMimeData()\n mime.setData( self.__mime__ , QByteArray(pickle.dumps(itms)) )\n\n # copy to clipboard\n QApplication.clipboard().setMimeData(mime,QClipboard.Clipboard)\n self.pasteAction.setEnabled(True)", "def copy(self):\n import copy\n MultiDict.__setitem__ = dict.__setitem__\n cp = copy.deepcopy(self)\n MultiDict.__setitem__ = MultiDict._setitem_list\n return cp", "def copy(self):\n return pdict(dict.copy(self))", "def copy(self):\n return self.update({})", "def copy_to_dict(\n self,\n context: Context,\n ) -> dict:\n return {\n 'action_id': self.action_id,\n 'hidden': not self.get_visible(context=context),\n 'label': self.get_label(context=context),\n 'url': self.get_url(context=context),\n }", "def __copy__(self):\n return Bag(self.items)", "def copy_to_dict(\n self,\n context: Context,\n ) -> dict:\n return {\n 'action_id': self.action_id,\n 'child_actions': self.child_actions,\n 'hidden': not self.get_visible(context=context),\n 'label': self.get_label(context=context),\n 'url': self.get_url(context=context),\n }", "def CopyToDict(self):\n return {'labels': self.labels}", "def preset_items(self):\r\n\r\n raise NotImplementedError", "def copy(self):\n return OrderedDict(self)", "def process(self, current_items, saved_items):\n result = {}\n for key in set(current_items.keys() + saved_items.keys()):\n old_item = saved_items.get(key, None)\n new_item = current_items.get(key, None)\n result[key] = self._process_item(old_item, new_item)\n\n return result", "def clone(self):\n return ga_list.ga_list.data_clone(self)", "def copy_tracks(self):\n tracks = []\n for ix in range(self.clementine_tracks.topLevelItemCount()):\n item = self.clementine_tracks.topLevelItem(ix)\n tracks.append((ix + 1, item.text(0)))\n with wait_cursor(self._parent):\n dmla.update_album_tracknames(self.a_album, tracks)\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex())", "def copy(self):\n return AttrDict(dict(self).copy())", "def get_selected_items_for_plot(self):\n # We want the dictionary to be sorted the same way as 'map_keys'\n sdict = self.stat_dict\n selected_keys = [_ for _ in self.map_keys if (_ in sdict) and (sdict[_] is True)]\n return selected_keys", "def copy(self):\n # dict.copy() works if all values are immutable, deepcopy(dict) otherwise.\n vf = VariantFinder(tree_input='', verbose=self.verbose, _blank_init=True)\n vf.tree = self.tree.copy()\n vf.leaves = self.leaves[::]\n vf.index = self.index.copy()\n vf.orig_dists = self.orig_dists.copy()\n vf.newick_tree_data = self.newick_tree_data\n vf.phyloxml_tree_data = self.phyloxml_tree_data\n vf.cache = deepcopy(self.cache)\n vf.normalize = deepcopy(self.normalize)\n vf.display_options = deepcopy(self.display_options)\n vf.selection_groups_order = self.selection_groups_order[::]\n vf.selection_groups_data = deepcopy(self.selection_groups_data)\n vf.file_name = self.file_name\n vf.max_root_distance = self.max_root_distance\n vf._not_ignored_inds = self._not_ignored_inds.copy()\n vf._ignored = self.ignored.copy()\n vf._chosen = self.chosen.copy()\n vf._available = self.available.copy()\n return vf", "def copy(self):\n return self.__class__(dict(self))", "def _selected_data(self):\n for items in self.ui.data_list.selectedItems():\n yield self._data[str(items.text())]", "def clone(self):\n return _libsbml.ListOfSubmodels_clone(self)", "def get_non_selected(self):\n\n obj_list = self.get_list()\n\n for sel in self.get_selected():\n obj_list.remove(sel)\n\n return obj_list", "def sidebar_menu_subitems(item, selected):\n\n return {\n 'item': item,\n 'selected': selected,\n }", "def on_selected_new_item(self, item):\n pass", "def copy(self):\n return defaultdict.copy(self)" ]
[ "0.5568035", "0.54771763", "0.5457668", "0.545101", "0.53839874", "0.53815985", "0.5333072", "0.52926517", "0.5273943", "0.52510434", "0.52496594", "0.524671", "0.5231919", "0.5220479", "0.5164312", "0.51326734", "0.5117656", "0.5115659", "0.5113828", "0.510586", "0.5099929", "0.50599605", "0.5042009", "0.50369525", "0.50299495", "0.5004981", "0.49967468", "0.49864575", "0.49761233", "0.49677098" ]
0.5978185
0
Parse the dwh.cfg configuration file
def config_parse_file(): global ANGELCO_EMAIL, ANGELCO_PASSWORD print("Parsing the config file...") config = configparser.ConfigParser() with open('dwh.cfg') as configfile: config.read_file(configfile) ANGELCO_EMAIL = config.get('ANGELCO', 'EMAIL') ANGELCO_PASSWORD = config.get('ANGELCO', 'PASSWORD')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_config(self):\n # TODO: parse config file\n pass", "def parse_conf(self):\n\n parser = configparser.RawConfigParser()\n parser.read(self.filename)\n\n try:\n self.id_node = parser['CONF_MACHINE']['ID_NODE']\n\n # eliminate possible white spaces between metrics\n temp = parser['CONF_MACHINE']['METRICS'].split(',')\n for itr in temp:\n self.metrics.append(itr.strip())\n\n except Exception:\n raise Exception(\"missing id or metrics\")\n\n try:\n self.interval = parser['CONF_MAHCINE']['INTERVAL']\n except Exception:\n self.interval = 1\n\n try:\n self.ampq_url = parser['ampq']['url']\n self.ampq_port = parser['ampq']['port']\n self.ampq_vhost = parser['ampq']['vhost']\n self.ampq_user = parser['ampq']['user']\n self.ampq_password = parser['ampq']['password']\n except Exception:\n raise Exception(\"missing ampq configs\")", "def _parse_conf(args):\n conf = args.config_dir\n assert os.path.isdir(conf), \\\n \"configuration directory {0} does not exist, run init\".format(conf)\n conf_file = os.path.join(conf, 'conf_{0}.json'.format(APP_NAME))\n assert os.path.isfile(conf_file), \\\n \"configuration file does not exist {0}, \\\n not properly initialized\".format(conf_file)\n with open(conf_file) as f:\n data = json.load(f)\n # validate data\n assert 'data_url' in data, \\\n \"data_url': '' not found in {0}\".format(conf_file)\n assert 'archive_paths' in data, \\\n \"'archive_paths': [] not found in {0}\".format(conf_file)\n assert 'min_loop' in data, \\\n \"'min_loop': [] not found in {0}\".format(conf_file)\n\n # build up nested named tuple to hold parsed config\n app_config = namedtuple(\n 'fixity',\n 'json_dir, conf_file, errors, ignore_re',\n )\n daemon_config = namedtuple('FixityDaemon', 'pid, log', )\n daemon_config.pid = os.path.abspath(\n os.path.join(conf, 'logs', '{0}.pid'.format(APP_NAME)))\n daemon_config.log = os.path.abspath(\n os.path.join(conf, 'logs', '{0}.log'.format(APP_NAME)))\n app_config.json_dir = os.path.abspath(os.path.join(conf, 'json_dir'))\n app_config.errors = os.path.abspath(os.path.join(conf, 'errors'))\n if 'ignore_paths' in data and data['ignore_paths'] != []:\n # http://stackoverflow.com/a/5141829/1763984\n app_config.ignore_re = r'|'.join(\n [fnmatch.translate(x) for x in data['ignore_paths']]\n ) or r'$.'\n else:\n app_config.ignore_re = False\n c = namedtuple('FixityConfig', 'app, daemon, args, data, conf_file')\n c.app = app_config\n c.daemon = daemon_config\n c.args = args\n c.data = data\n c.conf_file = os.path.abspath(conf_file)\n return c", "def loadConfig():\n lines = []\n config = {}\n here = path.dirname(__file__)\n fn = path.join(here,'manatee.conf')\n try:\n with codecs.open(fn,'rU','utf-8') as conf:\n lines = conf.readlines()\n conf.close()\n except IOError as e:\n print \" Could not open configuration file: %s\" % e\n\n for line in lines:\n try:\n line = line.strip()\n if line:\n values = [x.strip() for x in line.split('=')]\n config[values[0]] = values[1]\n except Exception as e:\n print \"There was an error in the configuration file: %s\" % e\n # TODO: Any strings from the config file that might be displayed or passed into the SQL server need to be validated here.\n# config = validateConfig(config)\n return config", "def parse_config(config_file):\n\n conf = {}\n config = configparser.ConfigParser()\n valid_schedule = r'\\d{1,2}:\\d{2}(:\\d{2})*\\s+[AM|PM]'\n \n #configparser does not throw exception (empty dataset if files are not found)\n if(len(config.read(config_file)) == 0):\n raise FileNotFoundError(\"Failed to find config file\")\n\n\n conf['credentials'] = {\"username\": config['credentials']['username'], \"password\": config['credentials']['password']}\n conf['hashtags'] = [hashtag for hashtag in config['hashtags'].values()]\n conf['schedule'] = [time.upper() for time in config['schedule'].values() if re.search(valid_schedule,time, re.IGNORECASE)]\n conf['driverpath'] = config['driver']['path']\n\n return conf", "def parse_data_config(path):\n cfg = dict()\n cfg['gpus'] = '0,1,2,3'\n cfg['num_workers'] = '10'\n \n with open(path, 'r') as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.strip()\n if line == '' or line.startswith('#'):\n continue\n key, value = line.split('=')\n cfg[key.strip()] = value.strip()\n \n return cfg", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg", "def read_config(self, config_filename):", "def Parse(self, path):\n try:\n self.cp = ConfigParser.ConfigParser()\n self.cp.read('%s/dmerce.cfg' % path)\n except:\n self.__log.Write(submodule = 'Parse', msgType = 'ERROR',\n msg = '%s %s' % (sys.exc_info()[0], sys.exc_info()[1]))\n return None", "def readConfig(quickLogger,basename):\n\n commonDictionary = {}\n analysisDictionary = {}\n likelihoodDictionary = {}\n plotDictionary = {}\n curveDictionary = {}\n\n try:\n checkForFiles(quickLogger,[basename+\".cfg\"])\n quickLogger.info('Reading from config file ('+basename+'.cfg)') \n config = ConfigParser.RawConfigParser()\n config.read(basename+'.cfg')\n \n if(config.has_section('common')):\n quickLogger.info('Reading common variables...')\n commonDictionary = dict(config.items('common'))\n if( commonDictionary['binned'] in ['True', 'true', '1', 'yes']):\n commonDictionary['binned'] = True\n else:\n commonDictionary['binned'] = False\n \n if(config.has_section('quickAnalysis')):\n quickLogger.info('Reading quickAnalysis variables...')\n analysisDictionary = dict(config.items('quickAnalysis'))\n\n if(config.has_section('quickLike')):\n quickLogger.info('Reading quickLike variables...')\n likelihoodDictionary = dict(config.items('quickLike'))\n\n if(config.has_section('quickPlot')):\n quickLogger.info('Reading quickPlot variables...')\n plotDictionary = dict(config.items('quickPlot'))\n\n if(config.has_section('quickCurve')):\n quickLogger.info('Reading quickCurve variables...')\n curveDictionary = dict(config.items('quickCurve'))\n\n return commonDictionary,analysisDictionary,likelihoodDictionary,plotDictionary,curveDictionary\n\n except(FileNotFound):\n raise FileNotFound\n return", "def load_config(f):\n config = ConfigParser.RawConfigParser()\n config.readfp(f)\n # Mininum required data. Raises exception if non-existent.\n config.get('memrise', 'username')\n config.get('beeminder', 'username')\n config.get('beeminder', 'auth_token')\n config.get('beeminder', 'goal_slug')\n return config", "def readConfig(self, cfg='hamsterPrinter.cfg'):\n from configparser import ConfigParser\n parser = ConfigParser()\n parser.read(cfg)\n return parser", "def config_parsing(configfile):\n config = ConfigParser.ConfigParser()\n config.read(configfile)\n db_connection = config.get('app:main', 'database_connection')\n db, eng = map_database(db_connection)\n return db, eng", "def parse_cfg(cfgfile):\n file = open(cfgfile, \"r\")\n lines = file.read().split(\"\\n\") # store the lines in a list\n lines = [x for x in lines if len(x) > 0] # get read of the empty lines \n lines = [x for x in lines if x[0] != \"#\"] # get rid of comments\n lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces\n\n block = {}\n blocks = []\n\n for line in lines:\n if line[0] == \"[\":\n if len(block) != 0:\n blocks.append(block)\n block = {}\n block[\"type\"] = line[1:-1].rstrip()\n else:\n key, value = line.split(\"=\")\n block[key.rstrip()] = value.lstrip()\n blocks.append(block)\n return blocks", "def parse_config():\n config_path = Path(\"config.ini\")\n if config_path.exists():\n config.read(config_path)\n else:\n config[\"database\"] = {\"location\": \"image-database.db\"}\n config[\"images\"] = {\"extensions\": \".jpeg,.jpg,.png,.gif,.tiff\"}\n with open(config_path, \"w\") as configfile:\n config.write(configfile)\n config.read(config_path)", "def initConf(confile = \"config.xml\"):\n if confile == \"\" or confile == None:\n confile = \"config.xml\"\n try:\n domTree = dom.parse(confile)\n root = domTree.documentElement\n rootName = root.nodeName\n if(rootName != \"py4d\"):\n print(\"Configuration file is broken:root element must be '<py4d>'!\")\n __parsePropertyElement(root.getElementsByTagName(\"property\"))\n headersTags = root.getElementsByTagName(\"headers\")\n if len(headersTags) > 0:\n for headerTag in headersTags:\n headers = headerTag.getElementsByTagName(\"header\")\n if len(headers) > 0:\n __parseHeaders(headers)\n except Exception as ex:\n print(\"Can not parse config file '\" + confile + \"' due to:\" + str(ex))\n sys.exit(1)\n finally:\n __searchTxtFileAndCreateFolder()", "def cfg_lod2(cfg_lod2_path):\n with open(cfg_lod2_path, 'r') as fo:\n return configure.parse_configuration(fo)", "def parse_conf(conf):\n global Message, Command\n Message = conf['message']\n Command = conf['command']\n write_streak()\n Parent.Log(ScriptName, 'Load conf: {}'.format((Message, Command)))", "def __check_configuration__(self, parser):\n if not parser.has_section('core'):\n self.logger.error('The config file should contain a core section with at least the module_path specified')\n sys.exit(1)\n\n else:\n if parser.get('core', 'modules_path', fallback=None) is None:\n self.logger.error('The configuration file should contain at least the modules_path value in core section.')\n sys.exit(1)\n\n if not parser.has_section('mysql'):\n self.logger.error('The config file should contain a mysql section.')\n sys.exit(1)\n\n else:\n if parser.get('mysql', 'host', fallback=None) is None:\n self.logger.error('The config file should contain the host value in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'port', fallback=None) is None:\n self.logger.error('The config file should contain the port value in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'user', fallback=None) is None:\n self.logger.error('The config file should contain the user in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'password', fallback=None) is None:\n self.logger.error('The config file should contain the password of the user in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'server_id', fallback=None) is None:\n self.logger.error('The config file should contain the server_id in mysql section.')\n sys.exit(1)\n\n if parser.get('mysql', 'tables', fallback=None) is not None:\n tables = [table.strip() for table in parser.get('mysql', 'tables').split(',')]\n for table in tables:\n if not parser.has_section(table):\n self.logger.error('The config file should contain a section about the table : %s' % table)\n exit(1)\n if parser.get(table, 'index_label', fallback=None) is None :\n self.logger.error('The config file should contain a table section with a index_label value.')\n exit(1)\n else:\n self.logger.error('The config file should contain a tables value with all the tables to replicate.')\n exit(1)", "def parse(self):\n raw_config_lines = self.load_config()\n self.config_lines_str = raw_config_lines\n self._create_cfg_line_objects()", "def build_config_parser(filename='GradientOneAuthConfig.txt'):\n cfg = ConfigParser(dict_type=dict)\n cfg.optionxform = str\n cfgfile = None\n try:\n cfgfile = find_file(filename)\n except IOError:\n raise ValueError(\"Could not find a {} file. Please download \"\n \"one for this machine.\".format(filename))\n try:\n cfg.read(cfgfile)\n except IOError:\n raise ValueError(\"Could not read the {} file. Please download a \"\n \"valid config file for this machine.\"\n .format(filename))\n return cfg", "def read_collector_config(cfg_file):\n hpifreqs = []\n linefreq = None\n if op.isfile(cfg_file):\n with open(cfg_file, 'r') as f:\n flines = f.read().splitlines()\n for line in flines:\n lit = line.split()\n if len(lit) > 1:\n if lit[0].find('hpiFreq') == 0:\n hpifreqs.append(float(lit[1]))\n elif lit[0].find('lineFreq') == 0:\n linefreq = float(lit[1])\n return linefreq, hpifreqs", "def parse(self):\n\t\tself.maincfg_values = self._load_static_file(self.cfg_file)\n\t\t\n\t\tself.cfg_files = self.get_cfg_files()\n\t\t\n\t\tself.resource_values = self.get_resources()\n\t\t\n\t\tself.timestamps = self.get_timestamps()\n\t\t\n\t\t## This loads everything into\n\t\tfor cfg_file in self.cfg_files:\n\t\t\tself._load_file(cfg_file)\n\n\t\tself._post_parse()", "def parsedbconfig(self):\n p = xml.parsers.expat.ParserCreate()\n p.StartElementHandler = start_element\n f=open(self.__dbconfig,'r')\n p.ParseFile(f)\n self.fillregistry()", "def parse_config (config_file, option):\n\top_config = open(config_file, \"r\")\n\tif option == \"blast\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"blast\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tdb = line[1]\n\t\t\t\tevalue = line[2]\n\t\t\t\treturn(db, evalue)\n\telif option == \"clustalw\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith (\"clustalw\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tclustal_path = line[1]\n\t\t\t\treturn (clustal_path)\n\n\telif option == \"plotly\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"plotly\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tusername = line[1]\n\t\t\t\tapi_key = line[2]\n\t\t\t\treturn (username, api_key)", "def check_config(cfg):", "def read_cfg(file_path, account):\n d = {}\n parser = SafeConfigParser()\n\n try:\n parser.read(os.path.expanduser(file_path))\n for option in parser.options(account):\n # [1:-1] strips apostrophes wrapping the string\n d[option] = parser.get(account, option)[1:-1]\n return d\n except:\n print \"Config read failed\"\n return None", "def parse_config(cls, fname):\n with open(fname) as f:\n lines = [l.strip() for l in f.read().split('\\n') if l.strip()]\n\n comments = [l.replace('#', '').strip()\n for l in lines if l.startswith('#')]\n lines = [l for l in lines if not l.startswith('#')]\n\n # We use a simple state-machine approach to the parsing\n # in order to deal with multi-line sections.\n parsing = False\n keys = cls.config_guide.keys()\n vars = {var: [] for var in keys}\n for line in lines:\n if not parsing:\n k, csv = line.split(':')\n else:\n csv = line\n\n vars[k].extend([val.strip() for val in csv.split(',')])\n parsing = not line.endswith(';')\n if not parsing:\n vars[k][-1] = vars[k][-1][:-1] # remove semi-colon\n\n # Remove whitespace strings. These may have come from something like:\n # c: this, , that;\n for k in keys:\n vars[k] = [val for val in vars[k] if val] # already stripped\n\n return comments, vars", "def _read_config_file(self):\n wcec = 0\n deadline = 0\n jitter = 0\n init_freq = 0\n freqs_volt = {}\n freqs = []\n volts = []\n config_file_name = self._find_file('sim_simple.config')\n with open(config_file_name, 'rU') as f:\n lines = f.readlines()\n try:\n for freq in lines[0].split():\n freqs.append(float(freq))\n\n for volt in lines[1].split():\n volts.append(float(volt))\n\n for i in range(0, len(freqs)):\n freqs_volt[freqs[i]] = volts[i]\n\n data = lines[2].split()\n wcec = float(data[0])\n deadline = float(data[1])\n period = float(data[2])\n jitter = float(data[3])\n init_freq = float(data[4])\n except ValueError, IndexError:\n print 'Invalid data in config file'\n sys.exit(1)\n\n return wcec, deadline, period, jitter, init_freq, freqs_volt", "def makeConfig (self):\n for line in self.lines :\n ll = line.split ('=', 1)\n if len(ll) < 2 :\n print \"Error in parsing cfg label line: \" , line\n return None\n self.config[(ll[0]).strip()] = ((ll[1]).strip())" ]
[ "0.7338273", "0.65946966", "0.6553853", "0.6462676", "0.6443636", "0.6435526", "0.64032066", "0.6393902", "0.6384757", "0.6336177", "0.63237095", "0.6297659", "0.62877256", "0.62548345", "0.6230277", "0.6215732", "0.621042", "0.62055796", "0.6202254", "0.6191139", "0.618787", "0.6184455", "0.61823547", "0.6177381", "0.6169274", "0.61556166", "0.6152873", "0.6146492", "0.6128992", "0.61272717" ]
0.71468973
1
Find the length of a leb128 field in a bytestream
def len_leb128(self,bstream): start = bstream.offset length = 0 r_byte = bstream.read(1) if r_byte != 0x0: length = 1 while len(r_byte) == 1 and length < 5 and r_byte[0] & 0x80 != 0x0: length+=1 r_byte = bstream.read(1) bstream.offset = start return length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bit_length(self, ???):", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[2:3])[0]", "def length(self):\n return struct.unpack('<H', self.pkt.payload[2:4])[0]", "def length(self):\n return struct.unpack('<B', self.pkt.payload[1:2])[0]", "def network_byte_length(self) -> int:", "def length(self):\n return struct.unpack('<H', self.pkt.payload[6:8])[0]", "def decode_length(raw_bytes: bytes) -> Tuple[int, int]:\n length = size = 0\n while size < len(raw_bytes):\n elem = raw_bytes[size]\n length |= (elem & 0x7F) << (size * 7)\n size += 1\n if (elem & 0x80) == 0:\n break\n return length, size", "def getPacketLength(binaryString, startPos=0):\n if (len(binaryString) - startPos) < PRIMARY_HEADER_BYTE_SIZE:\n raise Error(\"packet header is too small\")\n return ((binaryString[startPos + 4] * 256) + binaryString[startPos + 5])", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def bit_length(self): # real signature unknown; restored from __doc__\n pass", "def decode_length(data: bytes) -> LengthValue:\n if data[0] == 0b11111111:\n # reserved\n raise NotImplementedError('This is a reserved case in X690')\n elif data[0] & 0b10000000 == 0:\n # definite short form\n output = int.from_bytes([data[0]], 'big')\n data = data[1:]\n elif data[0] ^ 0b10000000 == 0:\n # indefinite form\n raise NotImplementedError('Indefinite lenghts are not yet implemented!')\n else:\n # definite long form\n num_octets = int.from_bytes([data[0] ^ 0b10000000], 'big')\n value_octets = data[1:1+num_octets]\n output = int.from_bytes(value_octets, 'big')\n data = data[num_octets + 1:]\n return LengthValue(output, data)", "def get_uncompressed_size(self):\n if not self.is_compressed():\n return self.size\n elif self.data is None:\n raise ValueError('Data not read from record')\n else:\n return unpack('<L', self.data[:4])[0]", "def raw_data_length(self):\n return self.unpack_dword(0x4)", "def get_payload_length(packet):\n adaptation_field_len = TS.get_adaptation_field_length(packet)\n return 188 - 4 - adaptation_field_len", "def encoded_length(self):\n return self._encoded_length" ]
[ "0.6960859", "0.68176603", "0.68176603", "0.68099785", "0.67909276", "0.6746086", "0.6694288", "0.660601", "0.64682", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.6451434", "0.64108014", "0.6386752", "0.63694125", "0.6363689", "0.63069993" ]
0.7991842
0
Add the given Reply to this transaction store's list of responses. Also add to processedRequests if not added previously.
async def append(self, reply: Reply) \ -> None: result = reply.result identifier = result.get(f.IDENTIFIER.nm) txnId = result.get(TXN_ID) logger.debug("Reply being sent {}".format(reply)) if self._isNewTxn(identifier, reply, txnId): self.addToProcessedTxns(identifier, txnId, reply) if identifier not in self.responses: self.responses[identifier] = asyncio.Queue() await self.responses[identifier].put(reply)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToProcessedTxns(self,\n identifier: str,\n txnId: str,\n reply: Reply) -> None:\n self.transactions[txnId] = reply\n if identifier not in self.processedRequests:\n self.processedRequests[identifier] = {}\n self.processedRequests[identifier][reply.reqId] = txnId", "def add_responses(self, response):\n self.responses = self.responses.union(set(response) if type(response) is not set else response)\n # return Post(self.title, self.timestamp, self.subject, self.content, self.resto,\n # self.responses.union(set(response) if type(response) is not set else response))", "def store_response(self, new_response):\n self.responses.append(new_response)", "def add_response(self, req, resp):\n if self._cache is None:\n return\n signature = sign(req.allocateQuotaRequest)\n with self._cache as c:\n now = self._timer()\n item = c.get(signature)\n if item is None:\n c[signature] = CachedItem(\n req, resp, self.service_name, now)\n else:\n # Update the cached item to reflect that it is updated\n item.last_check_time = now\n item.response = resp\n item.is_in_flight = False\n c[signature] = item", "def addResponse(self, response):\n if isinstance(response, PollResponse):\n self.responses.append(response)\n else:\n raise TypeError(\"addResponse requires a PollResponse object\")", "def queue_response(self, **kwargs):\n self.response_list.append(kwargs)\n self.semaphore.release()", "def add_orders(self, response_data):\n orders = response_data[self.DATA][self.DATA]\n for order in orders:\n self.orders.append(self.process_order_data(order))", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=6)\n\n self.request_buffer.process_reply(reply)\n self.assertEqual(len(self.request_buffer.requests), 5)", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def _post_answer(self, answer):\n print(answer)\n self.messages_received.append(answer)", "def add(self, answer):\n self._validate(answer)\n\n answer_to_add = answer.__dict__.copy()\n\n if self.exists(answer):\n raise ValueError(\"Answer instance already exists in store\")\n else:\n self.answers.append(answer_to_add)", "def _request_ended(self, reply):\n\n if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute):\n self.http_resources.append(HttpResource(reply, self.cache))", "def append(self, response):\n\n self.__responses.append(response)\n\n if len(self) == 1:\n self.__rtt_avg = response.time_elapsed\n self.__rtt_max = response.time_elapsed\n self.__rtt_min = response.time_elapsed\n else:\n # Calculate the total of time, add the new value and divide for the new number\n self.__rtt_avg = ((self.__rtt_avg * (len(self) - 1)) + response.time_elapsed) / len(self)\n if response.time_elapsed > self.__rtt_max:\n self.__rtt_max = response.time_elapsed\n if response.time_elapsed < self.__rtt_min:\n self.__rtt_min = response.time_elapsed\n\n self.__packets_lost = self.__packets_lost + (0 if response.success else 1 - self.__packets_lost) / len(self)\n\n if self.verbose:\n print(response, file=self.output)", "def add_autoresponse(self, **options):\n\n message = options['message']\n bot = options['bot']\n source = options['source']\n\n if message:\n options, phrase = bot.autoresponse_manager.parse_autoresponse_arguments(message)\n\n if options is False:\n bot.whisper(source.username, 'Invalid autoresponse')\n return False\n\n options['added_by'] = source.id\n options['edited_by'] = source.id\n\n autoresponse, new_autoresponse = bot.autoresponse_manager.create_autoresponse(phrase, **options)\n\n if new_autoresponse is True:\n bot.whisper(source.username, 'Added your autoresponse (ID: {autoresponse.id})'.format(autoresponse=autoresponse))\n AdminLogManager.post('Banphrase added', source, phrase)\n return True\n\n autoresponse.set(**options)\n autoresponse.data.set(edited_by=options['edited_by'])\n DBManager.session_add_expunge(autoresponse)\n bot.autoresponse_manager.commit()\n bot.whisper(source.username, 'Updated your autoresponse (ID: {autoresponse.id}) with ({what})'.format(autoresponse=autoresponse, what=', '.join([key for key in options if key != 'added_by'])))\n AdminLogManager.post('Banphrase edited', source, phrase)", "def handle_response(self, response):\n with self.lock:\n req_id, status, message = response\n if req_id in self.pending_requests: # request may have timed out\n self.pending_requests[req_id].set((status, message))", "def add_response(self, resp):\n if resp and not issubclass(resp, BaseResponse):\n raise TypeError(\"custom response must be subclass of `pre_request.BaseResponse`\")\n\n self.response = resp", "def addResults(self, results):\n if results is None or len(results) == 0:\n self._results = None\n else:\n self._results = results", "def add(self, items: Iterable[requests.LeaseRequest]) -> None:\n with self._add_remove_lock:\n for item in items:\n # Add the ack ID to the set of managed ack IDs, and increment\n # the size counter.\n if item.ack_id not in self._leased_messages:\n self._leased_messages[item.ack_id] = _LeasedMessage(\n sent_time=float(\"inf\"),\n size=item.byte_size,\n ordering_key=item.ordering_key,\n )\n self._bytes += item.byte_size\n else:\n _LOGGER.debug(\"Message %s is already lease managed\", item.ack_id)", "def respond(self, request_id, response):\n response['rdf:type'] = self.response_type\n response['response_to'] = uri(request_id)\n\n LOG.debug(\n 'Responding to request {0} with {1}.'.format(request_id, response))\n\n response_triples = []\n for key, values in response.iteritems():\n if not isinstance(values, list):\n values = [values]\n for value in values:\n response_triples.append(Triple(bnode('id'), key, value))\n\n self.sc.insert(response_triples)", "def _add_response(self, id):\n new_res = ResponseInfo()\n new_res._id = id\n self._responses.append(new_res)\n return new_res", "def add_or_update(self, answer):\n if self.exists(answer):\n self.update(answer)\n else:\n self.add(answer)", "def __try_add_reply(\n self, replies, current_reply, ig_users, footer,\n mention_author, force=False,\n ):\n idx = 0\n # the number of elements in current_reply that constitutes a reply\n # for a single user\n step = 1\n mention_tag = ''\n if mention_author:\n mention_tag = (\n Formatter.LINE_DELIM\n + Formatter.MENTIONER_FMT.format(author=mention_author)\n )\n\n logger.debug(mention_tag)\n\n full_reply = (\n Formatter.USER_SEPARATOR.join(filter(None, current_reply))\n + mention_tag\n + Formatter.LINE_DELIM\n + footer\n )\n while len(full_reply) >= Formatter.COMMENT_CHARACTER_LIMIT:\n # the total size of the current reply exceeds the maximum allowed\n # comment character length\n\n # truncate individual ig_user highlights from the end of the reply\n # until the reply is under the character limit\n idx -= step\n full_reply = (\n Formatter.USER_SEPARATOR.join(\n filter(None, current_reply[:idx])\n ) + mention_tag\n + Formatter.LINE_DELIM\n + footer\n )\n\n # don't de-sync the reply & ig_users list in case an overflow happens\n # when force==True (if this happens, the overflow will be dropped)\n # -- basically: don't call this function with force==True if there may\n # be overflow\n if force and idx == 0:\n # set the idx so that all of current_reply and ig_users is used\n idx = len(current_reply)\n\n if idx != 0:\n ig_idx = idx // step\n replies.append( (full_reply, ig_users[:ig_idx]) )\n # return the remainder\n # if force: ([], [])\n current_reply = current_reply[idx:]\n ig_users = ig_users[ig_idx:]\n\n return current_reply, ig_users", "def add_results(self, results):\n if self.replication_counter < self.replication_num:\n for metric in self.metrics:\n self.metric_final_results[metric].append(results[metric])\n\n self.replication_counter += 1\n else:\n raise Exception(\"The requested metric collection call of {}/{} exceeds the number of pre-defined replication\".format(self.replication_counter, self.replication_num))", "def replies(self):\r\n return Replies(self)", "def _uiClickAddResponse(self) -> None:\n\n response, okClicked = AddResponseDialog.getResponse(self.widget)\n if okClicked:\n self.model.add_response(response)", "def put_response(self, msg: Any) -> None:\n # redis.Connection.__del__ might call self.close at any time, which\n # will set self.responses to None. We assume this will happen\n # atomically, and the code below then protects us against this.\n responses = self.responses\n if responses:\n responses.put(msg)", "def add(self, results: Union[str, List[str]]) -> List[str]:\n all_matched_results = self._get_matched_results(results)\n added = []\n for result in all_matched_results:\n if result not in self.saved:\n self.saved.add(result)\n added.append(result)\n return added", "def __send_responses(self):\n # create a copy of the responses\n responses = self.__responses\n # for every response\n for response in responses:\n # send the response\n self.__send(response)\n # remove the response from the responses' list\n if response in self.__responses:\n self.__responses.remove(response)", "def click_add_response_button(self):\r\n self._find_within(\".add-response-btn\").first.click()\r\n EmptyPromise(\r\n lambda: self._find_within(\".discussion-reply-new textarea:focus\").present,\r\n \"Response field received focus\"\r\n ).fulfill()" ]
[ "0.6899966", "0.6166129", "0.61250067", "0.5979266", "0.57536274", "0.56638956", "0.5634618", "0.5630767", "0.54890007", "0.5479965", "0.5479965", "0.5436922", "0.5425434", "0.5344658", "0.5177299", "0.515309", "0.514785", "0.5144238", "0.51264435", "0.5090172", "0.5059836", "0.5049599", "0.50417787", "0.5035936", "0.5029241", "0.50236803", "0.50072104", "0.49925455", "0.49898335", "0.49860775" ]
0.8082104
0
If client is not in `processedRequests` or requestId is not there in processed requests and txnId is present then its a new reply
def _isNewTxn(self, identifier, reply, txnId) -> bool: return (identifier not in self.processedRequests or reply.reqId not in self.processedRequests[identifier]) and \ txnId is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):\n # Since view no is always zero in the current setup\n looper.run(eventually(checkSufficientRepliesRecvd,\n client1.inBox,\n sent1.reqId,\n 2,\n retryWait=.5,\n timeout=5))\n originalRequestResponsesLen = nodeCount * 2\n duplicateRequestRepliesLen = nodeCount # for a duplicate request we need to\n client1.nodestack._enqueueIntoAllRemotes(sent1, None)\n\n def chk():\n assertLength([response for response in client1.inBox\n if (response[0].get(f.RESULT.nm) and\n response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or\n (response[0].get(OP_FIELD_NAME) == REQACK and\n response[0].get(f.REQ_ID.nm) == sent1.reqId)],\n originalRequestResponsesLen + duplicateRequestRepliesLen)\n\n looper.run(eventually(\n chk,\n retryWait=1,\n timeout=20))", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=6)\n\n self.request_buffer.process_reply(reply)\n self.assertEqual(len(self.request_buffer.requests), 5)", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def addToProcessedTxns(self,\n identifier: str,\n txnId: str,\n reply: Reply) -> None:\n self.transactions[txnId] = reply\n if identifier not in self.processedRequests:\n self.processedRequests[identifier] = {}\n self.processedRequests[identifier][reply.reqId] = txnId", "def test_batch_by_transaction_id_response_handler_requested():\n # The completer does not have the requested batch with the transaction\n testResponder = TestResponder()\n\n before_message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n time_to_live=1)\n\n after_message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n time_to_live=0)\n\n testResponder.batch_by_txn_request_handler.handle(\n \"Connection_1\", before_message.SerializeToString())\n\n # If we cannot respond to the request, broadcast batch request and add\n # to pending request\n\n testResponder.gossip.clear()\n\n # Message should be dropped since the same message has already been\n # handled\n testResponder.batch_by_txn_request_handler.handle(\n \"Connection_2\", before_message.SerializeToString())\n\n message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n nonce=\"2\",\n time_to_live=1)\n testResponder.batch_by_txn_request_handler.handle(\n \"Connection_2\", message.SerializeToString())", "def test_handle_response_remove_request_from_pending(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotIn(uuid, lookup.pending_requests.keys())", "def process_request(self, request, client_address):\n self._request_queue.put((request, client_address))", "def handle_response(self, response):\n with self.lock:\n req_id, status, message = response\n if req_id in self.pending_requests: # request may have timed out\n self.pending_requests[req_id].set((status, message))", "async def append(self, reply: Reply) \\\n -> None:\n result = reply.result\n identifier = result.get(f.IDENTIFIER.nm)\n txnId = result.get(TXN_ID)\n logger.debug(\"Reply being sent {}\".format(reply))\n if self._isNewTxn(identifier, reply, txnId):\n self.addToProcessedTxns(identifier, txnId, reply)\n if identifier not in self.responses:\n self.responses[identifier] = asyncio.Queue()\n await self.responses[identifier].put(reply)", "def test_responder_batch_response_txn_handler():\n\n testResponder = TestResponder()\n\n transaction = transaction_pb2.Transaction(header_signature=\"123\")\n batch = batch_pb2.Batch(\n header_signature=\"abc\", transactions=[transaction])\n\n response_message = network_pb2.GossipBatchResponse(\n content=batch.SerializeToString())\n\n request_message = \\\n network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n time_to_live=1)\n\n # Send BatchByTransaciontIdRequest for txn \"123\" and add it to the\n # pending request cache\n testResponder.batch_request_handler.handle(\n \"Connection_2\", request_message.SerializeToString())\n\n # Send Batch Response that contains the batch that has txn \"123\"\n testResponder.batch_response_handler.handle(\n \"Connection_1\", (batch, response_message.SerializeToString()))", "def test_batch_by_transaction_id_response_handler():\n # The completer does not have the requested batch with the transaction\n testResponder = TestResponder()\n\n before_message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n nonce=\"1\",\n time_to_live=1)\n\n after_message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n nonce=\"1\",\n time_to_live=0)\n\n testResponder.batch_by_txn_request_handler.handle(\n \"Connection_1\", before_message.SerializeToString())\n\n # If we cannot respond to the request, broadcast batch request and add\n # to pending request\n # BatchByTransactionIdRequest\n message = network_pb2.GossipBatchByTransactionIdRequest(\n ids=[\"123\"],\n nonce=\"2\",\n time_to_live=1)\n transaction = transaction_pb2.Transaction(header_signature=\"123\")\n batch = batch_pb2.Batch(\n header_signature=\"abc\", transactions=[transaction])\n testResponder.completer.add_batch(batch)\n testResponder.batch_request_handler.handle(\n \"Connection_1\", message.SerializeToString())", "def jsonrpc_req_txn(self, txnid, addr):\n if ADD_NETWORK_DELAY:\n time.sleep(random.uniform(NETWORK_DELAY_MIN, NETWORK_DELAY_MAX))\n if VERBOSE:\n print('Received Request for txn {} from {}'.format(txnid, addr))\n if hex2b(txnid) in self.node.storage.invalid_txns:\n return None\n\n txn = self.node.storage.mempool[hex2b(txnid)]\n if txn is None:\n txnw = self.node.storage.db.get(hex2b(txnid))\n if txnw is None:\n txnw = self.node.storage.pend.get(hex2b(txnid))\n if txnw is None:\n if VERBOSE:\n print('Transaction {} not found!'.format(txnid))\n return 0\n txn = TxnWrapper.unserialize(SerializationBuffer(txnw)).txn\n\n txn = b2hex(txn.serialize().get_bytes())\n return txn", "def process_request(self, request, client_address):\n self.finish_request(request, client_address)", "def test_batch_by_id_responder_handler_requested():\n # The completer does not have the requested batch\n testResponder = TestResponder()\n\n before_message = network_pb2.GossipBatchByBatchIdRequest(\n id=\"abc\",\n nonce=\"1\",\n time_to_live=1)\n\n after_message = network_pb2.GossipBatchByBatchIdRequest(\n id=\"abc\",\n nonce=\"1\",\n time_to_live=0)\n testResponder.batch_request_handler.handle(\n \"Connection_1\", before_message.SerializeToString())\n # If we cannot respond to the request broadcast batch request and add\n # to pending request\n\n testResponder.gossip.clear()\n\n # Message should be dropped since the same message has already been\n # handled\n testResponder.batch_request_handler.handle(\n \"Connection_2\", before_message.SerializeToString())\n\n message = network_pb2.GossipBatchByBatchIdRequest(\n id=\"abc\",\n nonce=\"2\",\n time_to_live=1)\n\n testResponder.batch_request_handler.handle(\n \"Connection_2\", message.SerializeToString())", "def testReplyWhenRepliesFromAllNodesAreSame(looper, client1, wallet1):\n request = sendRandomRequest(wallet1, client1)\n looper.run(\n eventually(checkResponseRecvdFromNodes, client1,\n nodeCount, request.reqId,\n retryWait=1, timeout=20))\n checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)", "def _handle_requests(self):\n for request in self._requests[:]:\n self.logger.debug(\"Handling request: %r\", request)\n\n # an orphan request, client is not alive.\n if not request.server_request and not request.worker.is_alive:\n self.logger.warning(\"Client %r disconnected, request dropped\",\n request.worker.name)\n self._requests.remove(request)\n continue\n\n try:\n request_handler = self._get_request_handler(request)\n reply = request_handler(request)\n\n except _WaitingForResourceException as ex:\n self.logger.exception(str(ex))\n continue\n\n except Exception as ex:\n if isinstance(ex, ServerError):\n code = ex.ERROR_CODE\n content = ex.get_error_content()\n\n else:\n code = ServerError.ERROR_CODE\n content = str(ex)\n\n self.logger.exception(str(ex))\n reply = ErrorReply(code=code, content=content)\n\n reply.request_id = request.message.msg_id\n self._reactor.callFromThread(request.respond, reply)\n\n self._requests.remove(request)", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def _dispatch_to_client_request(self):\n # Listen for client connection\n self._to_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1)\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n self._to_client_connections.append(client_conn)\n print(\"Sending replies to [\" + client_addr[0] + \", \" + str(client_addr[1]) + ']')", "def validate_reply(request, reply):\n assert isinstance(reply, dict) and 'id' in reply\n assert ('result' in reply) != ('error' in reply)\n assert reply['id'] == request['id'] or \\\n reply['id'] == '00' and 'error' in reply", "def process_request(self, request, client_address):\n\t\tself.finish_request(request, client_address)\n\t\tself.close_request(request)", "def process_request(self, request, client_address):\n self.executor.submit(self.process_request_thread, request, client_address)", "def handle_request(self, request):\n request.command.set_origin(self.other)\n\n # Keep track of object locks here.\n create_versions = request.command.get_new_object_versions()\n depends_on_version = request.command.get_dependencies()\n\n # Always answer old requests.\n previous_request = self.committed_commands.try_get(request.cid)\n if previous_request:\n if previous_request.is_same_command(request):\n\n # Invariant\n assert all(str(cv) in self.object_locks\n for cv in create_versions)\n\n # Re-send the response.\n logger.debug(\n f'(other:{self.other_address_str}) '\n f'Handle request that alerady has a response: '\n f'cid #{request.cid}.',\n )\n return previous_request.response\n else:\n # There is a conflict, and it will have to be resolved\n # TODO[issue 8]: How are conflicts meant to be resolved?\n # With only two participants we cannot tolerate errors.\n response = make_protocol_error(\n request, code=OffChainErrorCode.conflict)\n\n response.previous_command = previous_request.command\n logger.error(\n f'(other:{self.other_address_str}) '\n f'Conflicting requests for cid {request.cid}'\n )\n return response\n\n missing_deps, used_deps, locked_deps = self.get_dep_locks(request)\n # Check potential protocol errors and exit\n if missing_deps:\n # Some dependencies are missing but may become available later?\n response = make_protocol_error(\n request,\n code=OffChainErrorCode.wait,\n message=f'dependencies {\", \".join(missing_deps)} are missing',\n )\n return response\n\n # Note: if locked depedency exists and self is client, yield locks to server\n # (i.e. let this command take over conflict objects)\n if locked_deps and self.is_server():\n # The server requests take precedence, so make this wait.\n response = make_protocol_error(\n request,\n code=OffChainErrorCode.wait,\n message=f'dependencies {\", \".join(locked_deps)} are locked',\n )\n return response\n\n # Check potential command errors and apply to request\n if used_deps:\n response = make_command_error(\n request,\n code=OffChainErrorCode.used_dependencies,\n message=f'dependencies {\", \".join(used_deps)} were used',\n )\n\n else: # Everything looks good, try to check command's integrity\n try:\n command = request.command\n my_address = self.get_my_address()\n other_address = self.get_other_address()\n\n self.processor.check_command(\n my_address, other_address, command)\n\n response = make_success_response(request)\n except CommandValidationError as e:\n response = make_command_error(\n request,\n code=e.error_code,\n message=e.error_message)\n\n # Write back to storage\n request.response = response\n\n self.committed_commands[request.cid] = request\n self.register_dependencies(request)\n self.apply_response(request)\n\n return request.response", "def abort_request_already_accepted(reqID):\n req = get_ride_request(reqID)\n if req.status == \"Accepted\":\n msg=\"Ride Request Cannot be changed: already accpeted\"\n abort(HTTPStatus.FORBIDDEN, message=msg)", "def _send_broker_unaware_request(self, requestId, request):\n\n # Check if we've had a condition which indicates we might need to\n # re-resolve the IPs of our hosts\n if self._collect_hosts_d:\n if self._collect_hosts_d is True:\n # Lookup needed, but not yet started. Start it.\n self._collect_hosts_d = _collect_hosts(self._hosts)\n broker_list = yield self._collect_hosts_d\n self._collect_hosts_d = None\n if broker_list:\n self._update_brokers(broker_list, remove=True)\n else:\n # Lookup of all hosts returned no IPs. Log an error, setup\n # to retry lookup, and try to continue with the brokers we\n # already have...\n log.error('Failed to resolve hosts: %r', self._hosts)\n self._collect_hosts_d = True\n\n brokers = list(self.clients.values())\n # Randomly shuffle the brokers to distribute the load, but\n random.shuffle(brokers)\n # Prioritize connected brokers\n brokers.sort(reverse=True, key=lambda broker: broker.connected())\n for broker in brokers:\n try:\n log.debug('_sbur: sending request: %d to broker: %r',\n requestId, broker)\n d = self._make_request_to_broker(broker, requestId, request)\n resp = yield d\n returnValue(resp)\n except KafkaError as e:\n log.warning(\"Could not makeRequest id:%d [%r] to server %s:%i, \"\n \"trying next server. Err: %r\", requestId,\n request, broker.host, broker.port, e)\n\n # Anytime we fail a request to every broker, setup for a re-resolve\n self._collect_hosts_d = True\n raise KafkaUnavailableError(\n \"All servers (%r) failed to process request\" % brokers)", "def _GetNewRequests(self):\n new_requests = self._GetRequestsByState(self._REQUESTED)\n if new_requests:\n while self._MakeRequestId() == new_requests[-1]:\n pass\n for request_id in new_requests:\n self._TransitionRequest(request_id, self._REQUESTED, self._PENDING)\n return new_requests", "def deleteRequest( self, requestId ):\n cmd = \"DELETE FROM `ProxyDB_Requests` WHERE Id=%s\" % requestId\n return self._update( cmd )", "def process_request(self, net_id, request_id, processed):\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id)\n request_submitted_marker = \"{0}request.submitted\".format(folder_path)\n request_processed_marker = \"{0}request.processed\".format(folder_path)\n request_returned_marker = \"{0}request.returned\".format(folder_path)\n request_voided_marker = \"{0}request.voided\".format(folder_path)\n request_submitted = path.exists(request_submitted_marker)\n\n if get_user_roles(current_user.net_id)[\"STFADM\"] and ((request_submitted and str_2_bool(processed)) or (not request_submitted and not str_2_bool(processed))):\n date_time = \"{0}\".format(datetime.now()).split()\n\n if path.exists(request_voided_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has been voided. Please refresh the page.\"})\n elif path.exists(request_returned_marker):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has been returned. Please refresh the page.\"})\n elif path.exists(request_processed_marker) and str_2_bool(processed):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been processed. Please refresh the page.\"})\n elif not path.exists(request_processed_marker) and not str_2_bool(processed):\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request has already been unprocessed. Please refresh the page.\"})\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"r\") as request_details_json:\n request_details = json.load(request_details_json)\n\n try:\n request_date = \"{0:02d}/{1:02d}/{2:04d}\".format(request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"],\n request_details[\"request_date\"][\"year\"])\n\n history_update = {\n \"date\": date_time[0],\n \"time\": date_time[1],\n \"action\": None,\n \"actor\": {\n \"first_name\": current_user.first_name,\n \"last_name\": current_user.last_name,\n \"email\": current_user.email,\n \"uta_id\": current_user.uta_id\n },\n \"metadata\": None\n }\n\n processed = str_2_bool(processed)\n if processed:\n remove(request_submitted_marker)\n optional_message = request.json.get(\"message\", \"\").strip()\n history_update[\"action\"] = \"Processed\"\n history_update[\"metadata\"] = {\n \"transaction_number\": request.json[\"transaction_number\"],\n \"message\": optional_message\n }\n\n with open(request_processed_marker, mode=\"w\") as processed_marker:\n processed_marker.write(\"/n\")\n\n if optional_message:\n optional_message_html = \"<br><br>Please see the attached message from {0} below:\" \\\n \"<br><blockquote style='border-left: 3px solid rgb(200, 200, 200); \" \\\n \"border-top-color: rgb(200, 200, 200); border-right-color: \" \\\n \"rgb(200, 200, 200); border-bottom-color: rgb(200, 200, 200); \" \\\n \"padding-left: 1ex; margin-left: 0.8ex; color: rgb(102, 102, 102);'>\" \\\n \"<div style='color: rgb(0, 0, 0);'>{1}</div>\" \\\n \"</blockquote>\".format(current_user.first_name, optional_message)\n optional_message = \"\\n\\nPlease see the attached message from {0} below:\" \\\n \"\\n{1}\".format(current_user.first_name, optional_message)\n else:\n optional_message_html = \"\"\n\n email_subject = \"Reimbursement Request Processed\"\n requester_payto_equal = request_details[\"requester\"][\"email\"].lower().strip() == request_details[\"pay_to\"][\"email\"].lower().strip()\n\n if requester_payto_equal:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message_html, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n else:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n request.json[\"transaction_number\"],\n optional_message_html, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n\n return_payload = {\"success\": True, \"type\": \"success\", \"message\": \"File marked as processed.\"}\n else:\n with open(request_submitted_marker, mode=\"w\") as submitted_marker:\n submitted_marker.write(\"/n\")\n\n user_message = request.json[\"message\"]\n history_update[\"action\"] = \"Unprocessed\"\n history_update[\"metadata\"] = {\n \"message\": user_message\n }\n\n email_subject = \"Reimbursement Request Unprocessed\"\n requester_payto_equal = request_details[\"requester\"][\"email\"].lower().strip() == request_details[\"pay_to\"][\"email\"].lower().strip()\n\n if requester_payto_equal:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_date, request_details[\"total_amount\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n else:\n email_body = app_constants.EMAILS[\"process_request\"][\"text\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n email_body_html = app_constants.EMAILS[\"process_request\"][\"html\"][processed][requester_payto_equal].format(\n request_details[\"pay_to\"][\"name\"],\n request_date, request_details[\"total_amount\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n current_user.first_name, current_user.email,\n user_message, request_details[\"short_description\"],\n request_details[\"requester\"][\"email\"],\n \"{0}mavapps/\".format(URL_FULL_PATH))\n\n if path.exists(request_processed_marker):\n remove(request_processed_marker)\n\n return_payload = {\"success\": True, \"type\": \"success\", \"message\": \"File marked as unprocessed.\"}\n\n with open(\"{0}submission.json\".format(folder_path), mode=\"w\") as request_details_json:\n request_details[\"history\"].append(history_update)\n json.dump(request_details, request_details_json)\n\n if SRV != \"prod\":\n emails = self.__TEST_EMAILS__\n else:\n emails = [[\"{0} {1}\".format(current_user.first_name, current_user.last_name), current_user.email],\n [\"{0} {1}\".format(request_details[\"requester\"][\"first_name\"], request_details[\"requester\"][\"last_name\"]), request_details[\"requester\"][\"email\"]]] \\\n + self.__PROD_EMAILS__\n if not requester_payto_equal:\n emails.append([\"{0}\".format(request_details[\"pay_to\"][\"name\"]), request_details[\"pay_to\"][\"email\"]])\n\n self.mailer.send_mail(emails, email_subject, email_body, email_body_html, from_name=\"CSE Reimbursement App\")\n\n return jsonify(return_payload)\n\n except Exception as e:\n print(e)\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Oops! Something went wrong, contact the \"\n \"administrator if the problem persists.\"})\n return abort(403)", "def _process_json_rpc_message(self, msg, msg_id):\n future = self._pending_requests.get(msg_id, None)\n if future:\n error = msg.get('error', None)\n result = msg.get('result', None)\n if error:\n future.set_result(error)\n else:\n future.set_result(result)\n else:\n self._logger.error(\n \"Message received without a matching pending request! '{}'\".format(msg))", "def send_next_request(self, reqs, response):\n\n req = reqs.pop(0)\n new_meta = response.meta.copy()\n if reqs:\n new_meta[\"reqs\"] = reqs\n return req.replace(meta=new_meta)", "def send_next_request(self, reqs, response):\n\n req = reqs.pop(0)\n new_meta = response.meta.copy()\n if reqs:\n new_meta[\"reqs\"] = reqs\n return req.replace(meta=new_meta)" ]
[ "0.6786671", "0.64508814", "0.64042187", "0.60409987", "0.5892617", "0.55976886", "0.5570875", "0.54957646", "0.54149187", "0.5410394", "0.5357963", "0.5327926", "0.5290472", "0.5285692", "0.51636094", "0.5159471", "0.5154646", "0.5153234", "0.51339537", "0.5120444", "0.5117518", "0.51102996", "0.5096407", "0.50853735", "0.5084513", "0.5046979", "0.50410676", "0.50146896", "0.49668288", "0.49668288" ]
0.66603124
1
Prints layer by layer inference times. Good for profiling which ops are most costly in your model.
def print_stats(exec_net, input_data, n_channels, batch_size, input_blob, out_blob, args): # Start sync inference print("Starting inference ({} iterations)".format(args.number_iter)) infer_time = [] for i in range(args.number_iter): input_data_transposed_1=input_data[0:batch_size].transpose(0,3,1,2) t0 = time.time() res = exec_net.infer(inputs={input_blob: input_data_transposed_1[:,:n_channels]}) infer_time.append((time.time() - t0) * 1000) average_inference = np.average(np.asarray(infer_time)) print("Average running time of one batch: {:.5f} ms".format(average_inference)) print("Images per second = {:.3f}".format(batch_size * 1000.0 / average_inference)) perf_counts = exec_net.requests[0].get_perf_counts() log.info("Performance counters:") log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format("name", "layer_type", "exec_type", "status", "real_time, us")) for layer, stats in perf_counts.items(): log.info("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats["layer_type"], stats["exec_type"], stats["status"], stats["real_time"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_layer_times(filename, only_model = -1):\n results = load_events.load_values(\n filename,\n event_names=['minibatch_time', 'objective_evaluation_time', 'objective_differentiation_time'],\n layer_event_names=['fp_time', 'bp_time', 'update_time', 'imcomm_time', 'opt_time'],\n model=-1)\n for model in results.keys():\n if model != only_model and only_model != -1:\n continue\n print('Model {0}:'.format(model))\n fp_tot = 0.0\n bp_tot = 0.0\n update_tot = 0.0\n imcomm_tot = 0.0\n opt_tot = 0.0\n for layer in results[model]['fp_time'].keys():\n fp_mean = np.mean(results[model]['fp_time'][layer])\n l_fp_tot = np.sum(results[model]['fp_time'][layer])\n bp_mean = np.mean(results[model]['bp_time'][layer])\n l_bp_tot = np.sum(results[model]['bp_time'][layer])\n update_mean = np.mean(results[model]['update_time'][layer])\n l_update_tot = np.sum(results[model]['update_time'][layer])\n imcomm_mean = 0.0\n l_imcomm_tot = 0.0\n if 'imcomm_time' in results[model] and layer in results[model]['imcomm_time']:\n imcomm_mean = np.mean(results[model]['imcomm_time'][layer])\n l_imcomm_tot = np.sum(results[model]['imcomm_time'][layer])\n opt_mean = 0.0\n l_opt_tot = 0.0\n if 'opt_time' in results[model] and layer in results[model]['opt_time']:\n opt_mean = np.mean(results[model]['opt_time'][layer])\n l_opt_tot = np.sum(results[model]['opt_time'][layer])\n fp_tot += l_fp_tot\n bp_tot += l_bp_tot\n update_tot += l_update_tot\n imcomm_tot += l_imcomm_tot\n opt_tot += l_opt_tot\n portion = imcomm_mean / (fp_mean + bp_mean + update_mean + imcomm_mean + opt_mean) * 100\n print('Layer {0}:\\tfp={1:<10.4}\\tbp={2:<10.4}\\tupdate={3:<10.4}\\topt={4:<10.4}\\timcomm={5:<10.4}\\tportion={6:.4}%'.format(\n layer, fp_mean, bp_mean, update_mean, opt_mean, imcomm_mean, portion))\n print(' '*len('layer {0}'.format(layer)) +\n ':\\tfp={0:<10.4}\\tbp={1:<10.4}\\tupdate={2:<10.4}\\topt={3:<10.4}\\timcomm={4:<10.4}\\tportion={5:.4}%'.format(\n l_fp_tot, l_bp_tot, l_update_tot, l_opt_tot, l_imcomm_tot,\n l_imcomm_tot / (l_fp_tot + l_bp_tot + l_update_tot + l_opt_tot + l_imcomm_tot) * 100))\n print('Total: fp={0:.4} bp={1:.4} update={2:.4} opt={3:.4} imcomm={4:.4} portion={5:.4}%'.format(\n fp_tot, bp_tot, update_tot, opt_tot, imcomm_tot,\n imcomm_tot / (fp_tot + bp_tot + update_tot + opt_tot + imcomm_tot) * 100))\n print('mbavg={0:.4} mbtot={1:.6} objvalavg={2:.4} objvaltot={3:.6} objgradavg={4:.4} objgradtot={5:.6}'.format(\n np.mean(results[model]['minibatch_time']), np.sum(results[model]['minibatch_time']),\n np.mean(results[model]['objective_evaluation_time']),\n np.sum(results[model]['objective_evaluation_time']),\n np.mean(results[model]['objective_differentiation_time']),\n np.sum(results[model]['objective_differentiation_time'])))", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def profile(self, layer, num_iter=50, num_warmup=10, direction='forward'):\n return TimeMeasure()", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def print_summary_(fct_name, compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n local_time, other_time,\r\n n_apply_to_print=config.ProfileMode.n_apply_to_print,\r\n n_ops_to_print=config.ProfileMode.n_ops_to_print,\r\n print_apply=True,\r\n min_memory_size=config.ProfileMode.min_memory_size,\r\n ):\r\n\r\n print \"ProfileMode is deprecated! Use the new profiler.\"\r\n print \" The Theano flags to enable it ise: profile=True\"\r\n print \" The Theano flags for the memory profile to it is: profile_memory=True\"\r\n\r\n total_time = time.time() - import_time\r\n total_fct_time = sum(fct_call_time.values())\r\n total_fct_call = sum(fct_call.values())\r\n unknown_time = total_time - total_fct_time - compile_time\r\n overhead_time = total_fct_time - local_time\r\n if total_fct_time > 0:\r\n time_pr_in_fct = local_time / total_fct_time * 100\r\n overhead_time_pourcent_fct_time = (overhead_time / total_fct_time *\r\n 100)\r\n time_per_call = total_fct_time / total_fct_call\r\n else:\r\n time_pr_in_fct = 0\r\n overhead_time_pourcent_fct_time = 0\r\n time_per_call = 0\r\n\r\n print\r\n print 'ProfileMode.%s(%s)' % (fct_name,message)\r\n print '---------------------------'\r\n print\r\n print 'Time since import %.3fs'%(total_time)\r\n print 'Theano compile time: %.3fs (%.1f%% since import)'%(compile_time, compile_time/total_time*100)\r\n print ' Optimization time: %.3fs'%(other_time['optimizer_time'])\r\n print ' Linker time: %.3fs'%(other_time['linker_time'])\r\n print 'Theano fct call %.3fs (%.1f%% since import)'%(total_fct_time, total_fct_time/total_time*100)\r\n print ' Theano Op time %.3fs %.1f%%(since import) %.1f%%(of fct call)'% (\r\n local_time, local_time/total_time*100, time_pr_in_fct)\r\n print ' Theano function overhead in ProfileMode %.3fs %.1f%%(since import) %.1f%%(of fct call)'% (\r\n overhead_time, overhead_time/total_time*100, overhead_time_pourcent_fct_time)\r\n print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)\r\n print 'Rest of the time since import %.3fs %.1f%%'%(unknown_time, unknown_time/total_time*100)\r\n\r\n print\r\n print 'Theano fct summary:'\r\n print '<% total fct time> <total time> <time per call> <nb call> <fct name>'\r\n for key in fct_call.keys():\r\n if fct_call[key]>0:\r\n print ' %4.1f%% %.3fs %.2es %d %s'%(fct_call_time[key]/total_fct_time*100 ,fct_call_time[key],\r\n fct_call_time[key]/fct_call[key], fct_call[key], key.name)\r\n else:\r\n print ' NOT CALLED',key.name\r\n\r\n\r\n # Compute stats per op.\r\n op_time = {}\r\n op_call = {}\r\n op_apply = {}\r\n op_cimpl = {}\r\n sop_apply = {}\r\n for (i,a),t in apply_time.items():\r\n op=a.op\r\n op_time.setdefault(op,0)\r\n op_call.setdefault(op,0)\r\n op_apply.setdefault(op,0)\r\n sop_apply.setdefault(type(a.op),0)\r\n op_time[op]+=t\r\n nb_call = [v for k,v in fct_call.items() if k.maker.fgraph is a.fgraph][0]\r\n op_cimpl.setdefault(a.op, True)\r\n op_cimpl[a.op] = op_cimpl[a.op] and apply_cimpl.get(a, False)\r\n if t==0:\r\n assert nb_call == 0, nb_call\r\n else:\r\n op_call[op] += nb_call\r\n op_apply[op] += 1\r\n sop_apply[type(a.op)] += 1\r\n\r\n # Compute stats per op class\r\n sop_time={}\r\n sop_call={}\r\n sop_op = {}\r\n sop_cimpl={} #map each op class to Bool. True iff all applies were done in c.\r\n for a,t in op_time.items():\r\n typ = type(a)\r\n sop_time.setdefault(typ,0)\r\n sop_time[typ]+=t\r\n sop_op.setdefault(typ,0)\r\n sop_op[typ]+=1\r\n sop_cimpl.setdefault(typ,True)\r\n sop_cimpl[typ]=sop_cimpl[typ] and op_cimpl.get(a, False)\r\n sop_call[typ]=sop_call.get(typ,0)+op_call[a]\r\n\r\n\r\n # Print the summary per op class.\r\n print\r\n print 'Single Op-wise summary:'\r\n print '<% of local_time spent on this kind of Op> <cumulative %> <self seconds> <cumulative seconds> <time per call> [*] <nb_call> <nb_op> <nb_apply> <Op name>'\r\n sotimes = [(t*100/local_time, t, a, sop_cimpl[a], sop_call[a], sop_op[a], sop_apply[a]) for a, t in sop_time.items()]\r\n sotimes.sort()\r\n sotimes.reverse()\r\n tot=0\r\n for f,t,a,ci, nb_call, nb_op, nb_apply in sotimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_op, nb_apply, a)\r\n print ' ... (remaining %i single Op account for %.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(sotimes)-n_ops_to_print),\r\n sum(soinfo[0] for soinfo in sotimes[n_ops_to_print:]),\r\n sum(soinfo[1] for soinfo in sotimes[n_ops_to_print:]))\r\n\r\n print '(*) Op is running a c implementation'\r\n\r\n\r\n # The summary per op\r\n op_flops = {}\r\n for a,t in op_time.items():\r\n if hasattr(a,'flops'):\r\n op_flops[a]=a.flops*op_call[a]/t/1e6\r\n flops_msg=''\r\n if op_flops:\r\n flops_msg=' <MFlops/s>'\r\n print '\\nHACK WARNING: we print the flops for some OP, but the logic don\\'t always work. You need to know the internal of Theano to make it work correctly. Otherwise don\\'t use!'\r\n print\r\n print 'Op-wise summary:'\r\n print '<%% of local_time spent on this kind of Op> <cumulative %%> <self seconds> <cumulative seconds> <time per call> [*] %s <nb_call> <nb apply> <Op name>'%(flops_msg)\r\n\r\n otimes = [(t*100/local_time, t, a, op_cimpl.get(a, 0), op_call.get(a, 0), op_apply.get(a,0))\r\n for a, t in op_time.items()]\r\n otimes.sort()\r\n otimes.reverse()\r\n tot=0\r\n for f,t,a,ci,nb_call,nb_apply in otimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n if op_flops:\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %7.1f %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, op_flops.get(a,-1), nb_call, nb_apply, a)\r\n else:\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_apply, a)\r\n print ' ... (remaining %i Op account for %6.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(otimes)-n_ops_to_print),\r\n sum(f for f, t, a, ci, nb_call, nb_op in otimes[n_ops_to_print:]),\r\n sum(t for f, t, a, ci, nb_call, nb_op in otimes[n_ops_to_print:]))\r\n print '(*) Op is running a c implementation'\r\n\r\n\r\n if print_apply:\r\n print\r\n print 'Apply-wise summary:'\r\n print '<% of local_time spent at this position> <cumulative %%> <apply time> <cumulative seconds> <time per call> [*] <nb_call> <Apply position> <Apply Op name>'\r\n atimes = [(t*100/local_time, t, a, [v for k,v in fct_call.items() if k.maker.fgraph is a[1].fgraph][0]) for a, t in apply_time.items()]\r\n atimes.sort()\r\n atimes.reverse()\r\n tot=0\r\n for f,t,a,nb_call in atimes[:n_apply_to_print]:\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if nb_call==0:\r\n continue\r\n if apply_cimpl.get(a[1], False):\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %i %2i %s' % (\r\n f, ftot, t, tot, t/nb_call, msg, nb_call, a[0], str(a[1]))\r\n print ' ... (remaining %i Apply instances account for %.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(atimes)-n_apply_to_print),\r\n sum(f for f, t, a, nb_call in atimes[n_apply_to_print:]),\r\n sum(t for f, t, a, nb_call in atimes[n_apply_to_print:]))\r\n print '(*) Op is running a c implementation'\r\n for printer in profiler_printers:\r\n printer(fct_name, compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n other_time)\r\n\r\n if not variable_shape:\r\n print \"\"\"\\nProfile of Theano intermediate memory disabled.\r\n To enabled, put the Theano flag ProfileMode.profile_memory to True.\"\"\"\r\n else:\r\n print \"\"\"\r\n The memory profile in ProfileMode is removed!\r\n Use the new profiler. Use the Theano flags\r\n profile=True,profile_memory=True to enable it.\"\"\"\r\n\r\n print\r\n print \"\"\"Here are tips to potentially make your code run faster\r\n(if you think of new ones, suggest them on the mailing list).\r\nTest them first, as they are not guaranteed to always provide a speedup.\"\"\"\r\n from theano import tensor as T\r\n from theano.tensor.raw_random import RandomFunction\r\n import theano\r\n import theano.scalar as scal\r\n scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE,\r\n scal.EQ, scal.NEQ, scal.InRange,\r\n scal.Switch, scal.OR, scal.XOR,\r\n scal.AND, scal.Invert, scal.Maximum,\r\n scal.Minimum, scal.Add, scal.Mul,\r\n scal.Sub, scal.TrueDiv, scal.IntDiv,\r\n scal.Clip, scal.Second, scal.Identity,\r\n scal.Cast, scal.Sgn, scal.Neg,\r\n scal.Inv, scal.Sqr]\r\n scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil,\r\n scal.Floor, scal.RoundHalfToEven,\r\n scal.RoundHalfAwayFromZero, scal.Log,\r\n scal.Log2, scal.Log10, scal.Log1p,\r\n scal.Exp, scal.Sqrt, scal.Abs, scal.Cos,\r\n scal.Sin, scal.Tan, scal.Tanh,\r\n scal.Cosh, scal.Sinh,\r\n T.nnet.sigm.ScalarSigmoid,\r\n T.nnet.sigm.ScalarSoftplus]\r\n # Abs, Mod in float{32,64} only\r\n\r\n def get_scalar_ops(s):\r\n if isinstance(s, theano.scalar.Composite):\r\n l = []\r\n for node in s.fgraph.toposort():\r\n l += get_scalar_ops(node.op)\r\n return l\r\n else:\r\n return [s]\r\n\r\n def list_scalar_op(op):\r\n if isinstance(op.scalar_op, theano.scalar.Composite):\r\n return get_scalar_ops(op.scalar_op)\r\n else:\r\n return [op.scalar_op]\r\n\r\n def amdlibm_speed_up(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n for s_op in l:\r\n if s_op.__class__ in scalar_op_amdlibm_speed_up:\r\n return True\r\n elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:\r\n print \"We don't know if amdlibm will accelerate this scalar op.\", s_op\r\n return False\r\n\r\n def exp_float32_op(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n return any([s_op.__class__ in [scal.Exp] for s_op in l])\r\n\r\n printed_tip = False\r\n #tip 1\r\n if config.floatX == 'float64':\r\n print \" - Try the Theano flag floatX=float32\"\r\n printed_tip = True\r\n\r\n #tip 2\r\n if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a\r\n in apply_time]):\r\n print \" - Try installing amdlibm and set the Theano flag lib.amdlibm=True. This speeds up only some Elemwise operation.\"\r\n printed_tip = True\r\n\r\n #tip 3\r\n if not config.lib.amdlibm and any([exp_float32_op(a.op) and\r\n a.inputs[0].dtype == 'float32'\r\n for i, a in apply_time]):\r\n print (\" - With the default gcc libm, exp in float32 is slower \"\r\n \"than in float64! Try Theano flag floatX=float64, or \"\r\n \"install amdlibm and set the theano flags lib.amdlibm=True\")\r\n printed_tip = True\r\n\r\n #tip 4\r\n for a, t in apply_time.iteritems():\r\n node = a[1]\r\n if (isinstance(node.op, T.Dot) and\r\n all([len(i.type.broadcastable) == 2 for i in node.inputs])):\r\n print (\" - You have a dot operation that was not optimized to\"\r\n \" dot22 (which is faster). Make sure the inputs are \"\r\n \"float32 or float64, and are the same for both inputs. \"\r\n \"Currently they are: %s\" %\r\n [i.type for i in node.inputs])\r\n printed_tip = True\r\n\r\n #tip 5\r\n for a, t in apply_time.iteritems():\r\n node = a[1]\r\n if isinstance(node.op, RandomFunction):\r\n printed_tip = True\r\n print (\" - Replace the default random number generator by \"\r\n \"'from theano.sandbox.rng_mrg import MRG_RandomStreams \"\r\n \"as RandomStreams', as this is is faster. It is still \"\r\n \"experimental, but seems to work correctly.\")\r\n if config.device.startswith(\"gpu\"):\r\n print (\" - MRG_RandomStreams is the only random number\"\r\n \" generator supported on the GPU.\")\r\n break\r\n\r\n if not printed_tip:\r\n print \" Sorry, no tip for today.\"", "def logging(self, function):\n avg_nms_time_per_step = sum(self.nms_times)/len(self.nms_times)\n avg_total_time_per_step = sum(self.total_times)/len(self.total_times)\n\n avg_min_latency = [x[0] for x in self.inference_times]\n avg_max_latency = [x[1] for x in self.inference_times]\n avg_latency = [x[2] for x in self.inference_times]\n\n function(\"Inference stats: image size {}x{}, batches per step {}, batch size {}, {} steps\".format(\n self.cfg.model.image_size, self.cfg.model.image_size, self.cfg.ipuopts.batches_per_step, self.cfg.model.micro_batch_size, len(self.total_times)\n ))\n function(\"--------------------------------------------------\")\n function(\"Inference\")\n function(\"Average Min Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_min_latency)/len(self.inference_times)))\n function(\"Average Max Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_max_latency)/len(self.inference_times)))\n function(\"Average Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_latency)/len(self.inference_times)))\n function(\"Average Inference Throughput: {:.3f} img/s\".format(sum(self.inference_throughputs)/len(self.inference_throughputs)))\n function(\"--------------------------------------------------\")\n # TODO remove the NMS and end-to-end time report once NMS is on device\n function(\"End-to-end\")\n function(\"Average NMS Latency per Batch: {:.3f} ms\".format(1000 * avg_nms_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"Average End-to-end Latency per Batch: {:.3f} ms\".format(1000 * avg_total_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"End-to-end Throughput: {:.3f} img/s\".format(sum(self.total_throughputs)/len(self.total_throughputs)))\n function(\"==================================================\")\n\n if self.cfg.eval.metrics:\n self.compute_and_print_eval_metrics()", "def print_summary_(fct_name, compile_time, fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n local_time, other_time,\n n_apply_to_print=config.ProfileMode.n_apply_to_print,\n n_ops_to_print=config.ProfileMode.n_ops_to_print,\n print_apply=True,\n min_memory_size=config.ProfileMode.min_memory_size,\n ):\n\n print(\"ProfileMode is deprecated! Use the new profiler.\")\n print(\" The Theano flags to enable it ise: profile=True\")\n print(\" The Theano flags for the memory profile to it is: \"\n \"profile_memory=True\")\n\n total_time = time.time() - import_time\n total_fct_time = sum(fct_call_time.values())\n total_fct_call = sum(fct_call.values())\n unknown_time = total_time - total_fct_time - compile_time\n overhead_time = total_fct_time - local_time\n if total_fct_time > 0:\n time_pr_in_fct = local_time / total_fct_time * 100\n overhead_time_pourcent_fct_time = (overhead_time / total_fct_time *\n 100)\n time_per_call = total_fct_time / total_fct_call\n else:\n time_pr_in_fct = 0\n overhead_time_pourcent_fct_time = 0\n time_per_call = 0\n\n print()\n print('ProfileMode.%s(%s)' % (fct_name, message))\n print('---------------------------')\n print()\n print('Time since import %.3fs' % (total_time))\n print('Theano compile time: %.3fs (%.1f%% since import)' %\n (compile_time, compile_time / total_time * 100))\n print(' Optimization time: %.3fs' % (other_time['optimizer_time']))\n print(' Linker time: %.3fs' % (other_time['linker_time']))\n print('Theano fct call %.3fs (%.1f%% since import)' %\n (total_fct_time, total_fct_time / total_time * 100))\n print(' Theano Op time %.3fs %.1f%%(since import) %.1f%%'\n '(of fct call)' % (local_time, local_time / total_time * 100,\n time_pr_in_fct))\n print(' Theano function overhead in ProfileMode %.3fs %.1f%%'\n '(since import) %.1f%%(of fct call)' % (\n overhead_time, overhead_time / total_time * 100,\n overhead_time_pourcent_fct_time))\n print('%i Theano fct call, %.3fs per call' %\n (total_fct_call, time_per_call))\n print('Rest of the time since import %.3fs %.1f%%' %\n (unknown_time, unknown_time / total_time * 100))\n\n print()\n print('Theano fct summary:')\n print('<% total fct time> <total time> <time per call> <nb call> '\n '<fct name>')\n for key in fct_call:\n if fct_call[key] > 0:\n print(' %4.1f%% %.3fs %.2es %d %s' %\n (fct_call_time[key] / total_fct_time * 100,\n fct_call_time[key],\n fct_call_time[key] / fct_call[key],\n fct_call[key],\n key.name))\n else:\n print(' NOT CALLED', key.name)\n\n # Compute stats per op.\n op_time = {}\n op_call = {}\n op_apply = {}\n op_cimpl = {}\n sop_apply = {}\n for (i, a), t in iteritems(apply_time):\n op = a.op\n op_time.setdefault(op, 0)\n op_call.setdefault(op, 0)\n op_apply.setdefault(op, 0)\n sop_apply.setdefault(type(a.op), 0)\n op_time[op] += t\n nb_call = [v for k, v in iteritems(fct_call)\n if k.maker.fgraph is a.fgraph][0]\n op_cimpl.setdefault(a.op, True)\n op_cimpl[a.op] = op_cimpl[a.op] and apply_cimpl.get(a, False)\n if t == 0:\n assert nb_call == 0, nb_call\n else:\n op_call[op] += nb_call\n op_apply[op] += 1\n sop_apply[type(a.op)] += 1\n\n # Compute stats per op class\n sop_time = {}\n sop_call = {}\n sop_op = {}\n # map each op class to Bool. True iff all applies were done in c.\n sop_cimpl = {}\n for a, t in iteritems(op_time):\n typ = type(a)\n sop_time.setdefault(typ, 0)\n sop_time[typ] += t\n sop_op.setdefault(typ, 0)\n sop_op[typ] += 1\n sop_cimpl.setdefault(typ, True)\n sop_cimpl[typ] = sop_cimpl[typ] and op_cimpl.get(a, False)\n sop_call[typ] = sop_call.get(typ, 0) + op_call[a]\n\n # Print the summary per op class.\n print()\n print('Single Op-wise summary:')\n print('<% of local_time spent on this kind of Op> <cumulative %> '\n '<self seconds> <cumulative seconds> <time per call> [*] '\n '<nb_call> <nb_op> <nb_apply> <Op name>')\n sotimes = [(t * 100 / local_time, t, a, sop_cimpl[a], sop_call[a],\n sop_op[a], sop_apply[a]) for a, t in iteritems(sop_time)]\n sotimes.sort()\n sotimes.reverse()\n tot = 0\n for f, t, a, ci, nb_call, nb_op, nb_apply in sotimes[:n_ops_to_print]:\n if nb_call == 0:\n assert t == 0\n continue\n tot += t\n ftot = tot * 100 / local_time\n if ci:\n msg = '*'\n else:\n msg = ' '\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d '\n '%2d %s' % (f, ftot, t, tot, t / nb_call, msg, nb_call,\n nb_op, nb_apply, a))\n print(' ... (remaining %i single Op account for %.2f%%(%.2fs) of '\n 'the runtime)' %\n (max(0, len(sotimes) - n_ops_to_print),\n sum(soinfo[0] for soinfo in sotimes[n_ops_to_print:]),\n sum(soinfo[1] for soinfo in sotimes[n_ops_to_print:])))\n\n print('(*) Op is running a c implementation')\n\n # The summary per op\n op_flops = {}\n for a, t in iteritems(op_time):\n if hasattr(a, 'flops'):\n op_flops[a] = a.flops * op_call[a] / t / 1e6\n flops_msg = ''\n if op_flops:\n flops_msg = ' <MFlops/s>'\n print(\"\\nHACK WARNING: we print the flops for some OP, but the \"\n \"logic doesn't always work. You need to know the \"\n \"internals of Theano to make it work correctly. \"\n \"Otherwise don't use it!\")\n print()\n print('Op-wise summary:')\n print('<%% of local_time spent on this kind of Op> <cumulative %%> '\n '<self seconds> <cumulative seconds> <time per call> [*] %s '\n '<nb_call> <nb apply> <Op name>' % (flops_msg))\n\n otimes = [(t * 100 / local_time, t, a, op_cimpl.get(a, 0),\n op_call.get(a, 0), op_apply.get(a, 0))\n for a, t in iteritems(op_time)]\n otimes.sort()\n otimes.reverse()\n tot = 0\n for f, t, a, ci, nb_call, nb_apply in otimes[:n_ops_to_print]:\n if nb_call == 0:\n assert t == 0\n continue\n tot += t\n ftot = tot * 100 / local_time\n if ci:\n msg = '*'\n else:\n msg = ' '\n if op_flops:\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %7.1f '\n '%5d %2d %s' % (f, ftot, t, tot, t / nb_call, msg,\n op_flops.get(a, -1), nb_call, nb_apply,\n a))\n else:\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d '\n '%s' % (f, ftot, t, tot, t / nb_call, msg, nb_call,\n nb_apply, a))\n print(' ... (remaining %i Op account for %6.2f%%(%.2fs) of the '\n 'runtime)' %\n (max(0, len(otimes) - n_ops_to_print),\n sum(f for f, t, a, ci, nb_call, nb_op in\n otimes[n_ops_to_print:]),\n sum(t for f, t, a, ci, nb_call, nb_op in\n otimes[n_ops_to_print:])))\n print('(*) Op is running a c implementation')\n\n if print_apply:\n print()\n print('Apply-wise summary:')\n print('<% of local_time spent at this position> <cumulative %%> '\n '<apply time> <cumulative seconds> <time per call> [*] '\n '<nb_call> <Apply position> <Apply Op name>')\n atimes = [(t * 100 / local_time, t, a,\n [v for k, v in iteritems(fct_call)\n if k.maker.fgraph is a[1].fgraph][0])\n for a, t in iteritems(apply_time)]\n atimes.sort()\n atimes.reverse()\n tot = 0\n for f, t, a, nb_call in atimes[:n_apply_to_print]:\n tot += t\n ftot = tot * 100 / local_time\n if nb_call == 0:\n continue\n if apply_cimpl.get(a[1], False):\n msg = '*'\n else:\n msg = ' '\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %i '\n '%2i %s' %\n (f, ftot, t, tot, t / nb_call, msg, nb_call, a[0],\n str(a[1])))\n print(' ... (remaining %i Apply instances account for '\n '%.2f%%(%.2fs) of the runtime)' %\n (max(0, len(atimes) - n_apply_to_print),\n sum(f for f, t, a, nb_call in atimes[n_apply_to_print:]),\n sum(t for f, t, a, nb_call in atimes[n_apply_to_print:])))\n print('(*) Op is running a c implementation')\n for printer in profiler_printers:\n printer(fct_name, compile_time, fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n other_time)\n\n if not variable_shape:\n print(\"\\nProfile of Theano intermediate memory disabled. \"\n \"To enable, set the Theano flag ProfileMode.profile_memory \"\n \"to True.\")\n else:\n print(\"\"\"\n The memory profile in ProfileMode is removed!\n Use the new profiler. Use the Theano flags\n profile=True,profile_memory=True to enable it.\"\"\")\n\n print()\n print(\"\"\"Here are tips to potentially make your code run faster\n(if you think of new ones, suggest them on the mailing list).\nTest them first, as they are not guaranteed to always provide a speedup.\"\"\")\n from theano import tensor as T\n from theano.tensor.raw_random import RandomFunction\n import theano\n import theano.scalar as scal\n scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE,\n scal.EQ, scal.NEQ, scal.InRange,\n scal.Switch, scal.OR, scal.XOR,\n scal.AND, scal.Invert, scal.Maximum,\n scal.Minimum, scal.Add, scal.Mul,\n scal.Sub, scal.TrueDiv, scal.IntDiv,\n scal.Clip, scal.Second, scal.Identity,\n scal.Cast, scal.Sgn, scal.Neg,\n scal.Inv, scal.Sqr]\n scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil,\n scal.Floor, scal.RoundHalfToEven,\n scal.RoundHalfAwayFromZero, scal.Log,\n scal.Log2, scal.Log10, scal.Log1p,\n scal.Exp, scal.Sqrt, scal.Abs, scal.Cos,\n scal.Sin, scal.Tan, scal.Tanh,\n scal.Cosh, scal.Sinh,\n T.nnet.sigm.ScalarSigmoid,\n T.nnet.sigm.ScalarSoftplus]\n\n def get_scalar_ops(s):\n if isinstance(s, theano.scalar.Composite):\n l = []\n for node in s.fgraph.toposort():\n l += get_scalar_ops(node.op)\n return l\n else:\n return [s]\n\n def list_scalar_op(op):\n if isinstance(op.scalar_op, theano.scalar.Composite):\n return get_scalar_ops(op.scalar_op)\n else:\n return [op.scalar_op]\n\n def amdlibm_speed_up(op):\n if not isinstance(op, T.Elemwise):\n return False\n else:\n l = list_scalar_op(op)\n for s_op in l:\n if s_op.__class__ in scalar_op_amdlibm_speed_up:\n return True\n elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:\n print(\"We don't know if amdlibm will accelerate \"\n \"this scalar op.\", s_op)\n return False\n\n def exp_float32_op(op):\n if not isinstance(op, T.Elemwise):\n return False\n else:\n l = list_scalar_op(op)\n return any([s_op.__class__ in [scal.Exp] for s_op in l])\n\n printed_tip = False\n # tip 1\n if config.floatX == 'float64':\n print(\" - Try the Theano flag floatX=float32\")\n printed_tip = True\n\n # tip 2\n if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a\n in apply_time]):\n print(\" - Try installing amdlibm and set the Theano flag \"\n \"lib.amdlibm=True. This speeds up only some Elemwise \"\n \"operation.\")\n printed_tip = True\n\n # tip 3\n if not config.lib.amdlibm and any([exp_float32_op(a.op) and\n a.inputs[0].dtype == 'float32'\n for i, a in apply_time]):\n print(\" - With the default gcc libm, exp in float32 is slower \"\n \"than in float64! Try Theano flag floatX=float64, or \"\n \"install amdlibm and set the theano flags lib.amdlibm=True\")\n printed_tip = True\n\n # tip 4\n for a, t in iteritems(apply_time):\n node = a[1]\n if (isinstance(node.op, T.Dot) and\n all([len(i.type.broadcastable) == 2\n for i in node.inputs])):\n print(\" - You have a dot operation that was not optimized to\"\n \" dot22 (which is faster). Make sure the inputs are \"\n \"float32 or float64, and are the same for both inputs. \"\n \"Currently they are: %s\" %\n [i.type for i in node.inputs])\n printed_tip = True\n\n # tip 5\n for a, t in iteritems(apply_time):\n node = a[1]\n if isinstance(node.op, RandomFunction):\n printed_tip = True\n print(\" - Replace the default random number generator by \"\n \"'from theano.sandbox.rng_mrg import MRG_RandomStreams \"\n \"as RandomStreams', as this is is faster. It is still \"\n \"experimental, but seems to work correctly.\")\n if config.device.startswith(\"gpu\"):\n print(\" - MRG_RandomStreams is the only random number\"\n \" generator supported on the GPU.\")\n break\n\n if not printed_tip:\n print(\" Sorry, no tip for today.\")", "def print_performance_info(self):\n pass", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def print_summary(self, **kwargs):\r\n compile_time = sum([ps.compile_time for ps\r\n in self.profile_stats.values()])\r\n\r\n fct_call = dict([(fn, ps.fct_callcount)\r\n for (fn, ps) in self.profile_stats.items()])\r\n\r\n fct_call_time = dict([(fn, ps.fct_call_time)\r\n for (fn, ps) in self.profile_stats.items()])\r\n\r\n apply_time = {}\r\n for fn, ps in self.profile_stats.items():\r\n for (i, node) in enumerate(fn.maker.fgraph.toposort()):\r\n apply_time[(i, node)] = ps.apply_time[node]\r\n for (i, n), t in apply_time.items():\r\n if t == 0:\r\n print i, n\r\n\r\n apply_cimpl = {}\r\n for fn, ps in self.profile_stats.items():\r\n apply_cimpl.update(ps.apply_cimpl)\r\n\r\n message = self.message\r\n\r\n variable_shape = {}\r\n for fn, ps in self.profile_stats.items():\r\n variable_shape.update(ps.variable_shape)\r\n\r\n other_time = dict(\r\n linker_time=sum(\r\n [ps.linker_time for ps in self.profile_stats.values()]),\r\n optimizer_time=sum(\r\n [ps.optimizer_time for ps in self.profile_stats.values()]))\r\n\r\n self.print_summary_(\"print_summary\",\r\n compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n self.local_time, other_time,\r\n **kwargs)", "def print_summary(self, **kwargs):\n compile_time = sum([ps.compile_time for ps\n in self.profile_stats.values()])\n\n fct_call = dict([(fn, ps.fct_callcount)\n for (fn, ps) in iteritems(self.profile_stats)])\n\n fct_call_time = dict([(fn, ps.fct_call_time)\n for (fn, ps) in iteritems(self.profile_stats)])\n\n apply_time = {}\n for fn, ps in iteritems(self.profile_stats):\n for (i, node) in enumerate(fn.maker.fgraph.toposort()):\n apply_time[(i, node)] = ps.apply_time[node]\n for (i, n), t in iteritems(apply_time):\n if t == 0:\n print(i, n)\n\n apply_cimpl = {}\n for ps in itervalues(self.profile_stats):\n apply_cimpl.update(ps.apply_cimpl)\n\n message = self.message\n\n variable_shape = {}\n for ps in itervalues(self.profile_stats):\n variable_shape.update(ps.variable_shape)\n\n other_time = dict(\n linker_time=sum(\n [ps.linker_time for ps in self.profile_stats.values()]),\n optimizer_time=sum(\n [ps.optimizer_time for ps in self.profile_stats.values()]))\n\n self.print_summary_(\"print_summary\",\n compile_time, fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n self.local_time, other_time,\n **kwargs)", "def print_stats(self):\n if self.n_iter % 5 != 0:\n return\n\n s_iter = \"%7i - \" % self.n_iter\n s_stat = ' || '.join([\n '{}: {:7.4f}'.format(k, np.mean(v)) for k, v in self.stats.items()\n if type(v) is list and len(v) > 0\n ])\n for k in self.stats.keys():\n if type(self.stats[k]) is list:\n del self.stats[k][:]\n\n # transformer learning rate\n # learning rates\n s_lr = \" - \"\n for k, v in self.optimizers.items():\n s_lr = s_lr + (\" - %s LR: \" % k) + \" / \".join(\n \"{:.4e}\".format(group['lr']) for group in v.param_groups)\n\n # processing speed\n new_time = time.time()\n diff = new_time - self.last_time\n s_speed = \"{:7.2f} sent/s - {:8.2f} words/s - \".format(\n self.stats['processed_s'] * 1.0 / diff,\n self.stats['processed_w'] * 1.0 / diff\n )\n self.stats['processed_s'] = 0\n self.stats['processed_w'] = 0\n self.last_time = new_time\n\n # log speed + stats + learning rate\n logger.info(s_iter + s_speed + s_stat + s_lr)", "def _compute_statistics(self):\n # log to file\n output_dir = self.params['output_dir']\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n output_dir = os.path.join(output_dir, 'statistics.log')\n log = logging.getLogger('tensorflow')\n handle = logging.FileHandler(output_dir)\n log.addHandler(handle)\n\n # FLOPS\n encoder_flops, decoder_flops = 0, 0\n encoder_count, decoder_count = 0, 0\n graph = tf.get_default_graph()\n for operation in graph.get_operations():\n flops = ops.get_stats_for_node_def(graph, operation.node_def,\n 'flops').value\n if flops is None:\n continue\n if operation.name.startswith('model/encoder'):\n # encoder\n encoder_flops += flops\n encoder_count += 1\n tf.logging.info('encoder operation %s : %d', operation.name, flops)\n elif operation.name.startswith('model/decoder'):\n # decoder\n decoder_flops += flops\n decoder_count += 1\n tf.logging.info('decoder operation %s : %d', operation.name, flops)\n else:\n # gradient\n pass\n tf.logging.info('flops of %d encoder tensor: %d',\n encoder_count, encoder_flops)\n tf.logging.info('flops of %d decoder tensor: %d',\n decoder_count, decoder_flops)\n tf.logging.info('flops of total %d tensor: %d',\n encoder_count + decoder_count,\n encoder_flops + decoder_flops)\n # parameters\n encoder_parameters, decoder_parameters = 0, 0\n encoder_count, decoder_count = 0, 0\n for var in tf.trainable_variables():\n parameters = np.prod(var.get_shape().as_list())\n if var.name.startswith('model/encoder'):\n # encoder\n encoder_parameters += parameters\n encoder_count += 1\n tf.logging.info('encoder variable %s : %d', var.name, parameters)\n elif var.name.startswith('model/decoder'):\n # decoder\n decoder_parameters += parameters\n decoder_count += 1\n tf.logging.info('decoder variable %s : %d', var.name, parameters)\n\n tf.logging.info('parameters of %d encoder tensor: %d',\n encoder_count, encoder_parameters)\n tf.logging.info('parameters of %d decoder tensor: %d',\n decoder_count, decoder_parameters)\n tf.logging.info('parameters of total %d tensor: %d',\n encoder_count + decoder_count,\n encoder_parameters + decoder_parameters)\n # disable log to file\n log.removeHandler(handle)", "def print_layers(model):\r\n for i in range(len(model.layers)):\r\n print(\"Printing layer shape: %d\" % i, model.layers[i])\r\n weights = model.layers[i].get_weights()\r\n for weight in weights: # Layer type\r\n print(weight.shape)", "def dump(self):\n fmt='%20s:%10.4fs%6.1f%%'\n print('\\n----------------TIME MANAGER PROFILE----------------\\n\\n')\n total_t=time.time()-self.tic0\n for rec in self.record:\n print(fmt % (rec[0],rec[1],100.0*rec[1]/total_t))\n print(fmt % ('TOTAL ELAPSED TIME', total_t, 100.0))\n print('\\n----------------TIME MANAGER PROFILE----------------\\n\\n')", "def _print_info(self,\n step,\n loss,\n t_cost):\n format_str = ('{0}: step {1}, loss = {2:.2f}' \\\n '({3:.3f} sec/batch)')\n print format_str.format(datetime.now(), \\\n step, loss, t_cost)", "def record_inference_stats(self, nms_step_time: float, inference_round_trip_time: Tuple[float, float, float], inference_step_time: float):\n\n # inference_round_trip_time is an average time needed for a step\n self.inference_times.append(inference_round_trip_time)\n # inference_step_time is the time taken to complete the step, and used to calculate the throughput\n inference_throughput = self.image_count/inference_step_time\n self.inference_throughputs.append(inference_throughput)\n\n self.nms_times.append(nms_step_time)\n\n total_step_time = inference_step_time + nms_step_time\n self.total_times.append(total_step_time)\n\n total_throughput = self.image_count/total_step_time\n self.total_throughputs.append(total_throughput)", "def print_layers(network, y_in):\r\n layer_features=get_layer_activations(network,y_in)\r\n for idx,feature in enumerate(layer_features):\r\n s=np.shape(feature)\r\n print(\"Layer \"+str(idx)+\": \"+str(s[1]*s[2]*s[3])+\" neurons / \", s)", "def print_func_measuremetns():\n print(\"Measured functions:\")\n for func in measured_funcs:\n fn = func.func_name\n tr = func.total_runtime\n tc = func.total_calls\n tpc = 'N/A' if tc == 0 else \"{:10.10f}\".format(tr / tc)\n line = \"{:>30}: {:10.8f}s over {:10d} calls ({} per call)\".format(fn, tr, tc, tpc)\n print(line)", "def display_log(self, **kwargs):\n stop_dict = kwargs[\"stop_dict\"]\n if stop_dict['stop'] == 1:\n print(\n \"{} algorithm -- Iteration {} over {} -- Elapsed time {} s -- Converged\".format(self.name,\n kwargs[\"iteration\"],\n self.nb_iter,\n kwargs[\"elapsed_time\"]))\n else:\n print(\n '{} algorithm -- Iteration {} over {} -- Elapsed time {} s -- Relative improvement {} %'.format(\n self.name,\n kwargs[\"iteration\"],\n self.nb_iter,\n kwargs[\"elapsed_time\"],\n 100 * stop_dict['relative_improvement']))", "def print_train_time(start: float, end: float, device: torch.device = None):\n total_time = end - start\n print(f\"Train time on {device}: {total_time:.3f} seconds\")\n return total_time", "def test_pydotprint_profile():\r\n\r\n # Skip test if pydot is not available.\r\n if not theano.printing.pydot_imported:\r\n raise SkipTest('pydot not available')\r\n\r\n A = tensor.matrix()\r\n f = theano.function([A], A + 1, mode='ProfileMode')\r\n theano.printing.pydotprint(f, print_output_file=False)", "def _disp(self, t_elapsed):\n disp_str = \"Epoch: %4d/%4d | Duration: %6.2f secs\" % \\\n (self.iteration, self.Nepochs, t_elapsed) \n disp_str += \" | Objective: %4e\" % self.of_list[-1]\n if self.disp_p:\n disp_str += \" | Parameters: %s\" % self.params\n print(disp_str)", "def runtime_print(f):\n def decorated_fun(*args, **kwargs):\n t0 = datetime.now()\n ret = f(*args, **kwargs)\n t1 = datetime.now()\n print(f'Runtime: {t1 - t0}')\n return ret\n\n return decorated_fun", "def print_layer_io_shapes(model):\n for i, _ in enumerate(model.layers):\n print(\"layer {} input: \".format(i), model.layers[i].input_shape)\n print(\"layer {} output:\".format(i), model.layers[i].output_shape)", "def print_statistics(session, batch_image, batch_label, cost, accuracy, type=\"VALIDATION\"):\n loss = session.run(\n cost,\n feed_dict={\n x: batch_image,\n y: batch_label,\n keep_prob: 1.0\n }\n )\n accuracy = session.run(\n accuracy,\n feed_dict={\n x: batch_image,\n y: batch_label,\n keep_prob: 1.0\n }\n )\n print(\"{} :: Loss = {} ; Accuracy = {} \".format(type, loss, accuracy))\n return loss, accuracy", "def long_print(self, file=sys.stderr, fct_name=None, message=None,\r\n n_apply_to_print=15, n_ops_to_print=20, print_apply=False):\r\n local_time = sum(self.apply_time.values())\r\n\r\n print ''\r\n print 'ProfileMode.long_print()'\r\n print 'name = %s' % fct_name\r\n print 'msg = %s' % message\r\n print '---------------------------'\r\n print ''\r\n\r\n print 'Total time spent running thunks: %.3fs' % local_time\r\n\r\n sop_time = {}\r\n sop_call = {}\r\n sop_op = {}\r\n #map each op class to Bool. True iff all applies were done in c.\r\n sop_c = {}\r\n for a, t in op_time.items():\r\n typ = type(a)\r\n sop_time.setdefault(typ, 0)\r\n sop_time[typ] += t\r\n sop_op.setdefault(typ, 0)\r\n sop_op[typ] += 1\r\n sop_c.setdefault(typ, True)\r\n sop_c[typ] = sop_c[typ] and op_cimpl.get(a, False)\r\n sop_call[typ] = sop_call.get(typ, 0) + op_call[a]\r\n print '\\nSingle Op-wise summary: <% of local_time spent on this kind of Op> <cumulative %%> <self seconds> <cumulative seconds> <time per call> <nb_call> <nb_op> <nb_op> <Op name>'\r\n sotimes = [(t * 100 / local_time, t, a, sop_c[a],\r\n sop_call[a], sop_op[a]) for a, t in sop_time.items()]\r\n sotimes.sort()\r\n sotimes.reverse()\r\n tot = 0\r\n for f, t, a, ci, nb_call, nb_op in sotimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot += t\r\n ftot = tot * 100 / local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_op, a)\r\n print ' ... (remaining %i Ops account for %.2f%%(%.2fs) of the runtime)'\\\r\n % (max(0, len(sotimes) - n_ops_to_print),\r\n sum(f for f, t, a, ci, nb_call, nb_op in\r\n sotimes[n_ops_to_print:]),\r\n sum(t for f, t, a, ci, nb_call, nb_op in\r\n sotimes[n_ops_to_print:]))\r\n\r\n total_time = time.time() - import_time\r\n total_fct_time = sum(fct_call_time.values())\r\n total_fct_call = sum(fct_call.values())\r\n other_time = total_time - total_fct_time - compile_time\r\n print\r\n print 'Theano fct summary: <% total fct time> <total time> <time per call> <nb call> <fct name>'\r\n for key in fct_call.keys():\r\n if fct_call[key] > 0:\r\n print ' %4.1f%% %.3fs %.2es %d %s'%(\r\n fct_call_time[key] / total_fct_time * 100,\r\n fct_call_time[key],\r\n fct_call_time[key] / fct_call[key],\r\n fct_call[key], key.name)\r\n else:\r\n print ' NOT CALLED',key.name\r\n\r\n if total_fct_time > 0:\r\n time_pr_in_fct = local_time / total_fct_time * 100\r\n time_per_call = total_fct_time / total_fct_call\r\n else:\r\n time_pr_in_fct = 0\r\n time_per_call = 0\r\n\r\n print\r\n print 'Time since import %.3fs' % (total_time)\r\n print 'Compile time: %.3fs %.1f%%' % (compile_time,\r\n compile_time / total_time * 100)\r\n print 'Theano fct call %.3fs %.1f%%' % (total_fct_time,\r\n total_fct_time / total_time *\r\n 100)\r\n print (' Theano Op time (included in fct call, Time spent '\r\n 'running thunks) %.3fs %.1f%%(of total) %.1f%%(of fct call)' %\r\n (local_time, local_time / total_time * 100, time_pr_in_fct))\r\n print 'Other time since import %.3fs %.1f%%'%(other_time,other_time/total_time*100)\r\n print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)\r\n\r\n print\r\n print \"List of apply that don't have float64 as input but have float64 in outputs. Usefull to know if we forgot some cast when using floatX=float32 or gpu code.\"\r\n print '<Apply> <Apply position> <fct name> <inputs type> <outputs type>'\r\n for fct in fct_call.keys():\r\n for idx, node in enumerate(fct.maker.fgraph.toposort()):\r\n if any(hasattr(i, 'dtype') and i.dtype == 'float64' for i in node.outputs) and not any(hasattr(i, 'dtype') and i.dtype == 'float64' for i in node.inputs):\r\n print str(node), idx, fct.name, str([getattr(i,'dtype',None) for i in node.inputs]),str([getattr(i,'dtype',None) for i in node.outputs])\r\n\r\n if any([x[2].__name__.startswith(\"Gpu\") for x in sotimes]):\r\n cpu = []\r\n gpu = []\r\n trans = []\r\n for so in sotimes:\r\n if so[2].__name__ in [\"HostFromGpu\", \"GpuFromHost\"]:\r\n trans.append(so)\r\n elif so[2].__name__.startswith(\"Gpu\"):\r\n gpu.append(so)\r\n else:\r\n cpu.append(so)\r\n sum_cpu = sum(so[1] for so in cpu)\r\n sum_gpu = sum(so[1] for so in gpu)\r\n sum_trans = sum(so[1] for so in trans)\r\n print\r\n\r\n print \"Spent %.3fs(%.3f%%) in cpu Op, %.3fs(%.3f%%) in gpu Op and %.3fs(%.3f%%) transfert Op\"%(\r\n sum_cpu, sum_cpu/local_time*100, sum_gpu, sum_gpu/local_time*100, sum_trans, sum_trans/local_time*100)\r\n\r\n print \"Theano function input that are float64\"\r\n print \"<fct name> <input name> <input type> <str input>\"\r\n for fct in fct_call.keys():\r\n for i in fct.input_storage:\r\n if hasattr(i.type, 'dtype') and i.type.dtype == 'float64':\r\n print fct.name, i.name, i.type, i\r\n\r\n print\r\n print \"Here are tips to potentially make your code run faster (if you think of new ones, suggest them on the mailing list). Test them first as they are not guaranteed to always provide a speedup.\"\r\n from theano import tensor as T\r\n from theano.tensor.raw_random import RandomFunction\r\n import theano\r\n import theano.scalar as scal\r\n scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE, scal.EQ, scal.NEQ, scal.InRange, scal.Switch, scal.OR, scal.XOR, scal.AND, scal.Invert, scal.Maximum, scal.Minimum, scal.Add, scal.Mul, scal.Sub, scal.TrueDiv, scal.IntDiv, scal.Clip, scal.First, scal.Second, scal.Identity, scal.Cast, scal.Sgn, scal.Neg, scal.Inv, scal.Sqr ]\r\n scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil, scal.Floor, scal.RoundHalfToEven, scal.RoundHalfAwayFromZero, scal.Log, scal.Log2, scal.Log10, scal.Log1p, scal.Exp, scal.Sqrt, scal.Abs, scal.Cos, scal.Sin, scal.Tan, scal.Tanh, scal.Cosh, scal.Sinh, T.nnet.sigm.ScalarSigmoid, T.nnet.sigm.ScalarSoftplus ]#Abs, Mod in float{32,64} only\r\n\r\n def get_scalar_ops(s):\r\n if isinstance(s, theano.scalar.Composite):\r\n l = []\r\n for node in s.fgraph.toposort():\r\n l+=get_scalar_ops(node.op)\r\n return l\r\n else: return [s]\r\n def list_scalar_op(op):\r\n if isinstance(op.scalar_op, theano.scalar.Composite):\r\n return get_scalar_ops(op.scalar_op)\r\n else: return [op.scalar_op]\r\n\r\n def amdlibm_speed_up(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n for s_op in l:\r\n if s_op.__class__ in scalar_op_amdlibm_speed_up:\r\n return True\r\n elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:\r\n print \"We don't know if amdlibm will accelerate this scalar op.\", s_op\r\n return False\r\n def exp_float32_op(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n return any([s_op.__class__ in [scal.Exp] for s_op in l])\r\n\r\n #tip 1\r\n if config.floatX=='float64':\r\n print \" - Try the Theano flag floatX=float32\"\r\n\r\n #tip 2\r\n if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i,a in apply_time]):\r\n print \" - Try installing amdlibm and set the Theano flag lib.amdlibm=True. This speed up only some Elemwise operation.\"\r\n\r\n #tip 3\r\n if not config.lib.amdlibm and any([exp_float32_op(a.op) and a.inputs[0].dtype=='float32' for i,a in apply_time]):\r\n print \" - With the default gcc libm, exp in float32 is slower than in float64! Try Theano flags floatX=float64 or install amdlibm and set the theano flags lib.amdlibm=True\"\r\n\r\n #tip 4\r\n for a, t in apply_time.iteritems():\r\n node = a\r\n if (isinstance(node.op, T.Dot) and\r\n all([len(i.type.broadcastable) == 2 for i in node.inputs])):\r\n print (\" - You have a dot operation that was not optimized \"\r\n \"to dot22 that is faster. Make sure the inputs are \"\r\n \"float32 or float64 and are the same for both inputs. \"\r\n \"Currently they are: %s\" %\r\n [i.type for i in node.inputs])\r\n\r\n #tip 5\r\n for a, t in apply_time.iteritems():\r\n node = a\r\n if isinstance(node.op, RandomFunction):\r\n print (\" - Replace the default random number generator by \"\r\n \"'from theano.sandbox.rng_mrg import MRG_RandomStreams \"\r\n \"as RandomStreams' as this is is faster. It is still \"\r\n \"experimental, but seams to work correctly.\")\r\n if config.device.startswith(\"gpu\"):\r\n print (\" - MRG_RandomStreams is the only random number\"\r\n \" supported on the GPU.\")\r\n break", "def print_train_time(start, end, device=None):\n total_time = end - start\n print(f\"\\nTrain time on {device}: {total_time:.3f} seconds\")\n return total_time", "def print_speed_and_accuracy(models = [\"decisiontree\", \"gradienttreeboosting\", \"knn\", \"logisticregression\", \"randomforest\"]):\n\ttest_data = joblib.load(\"data/test_data\")\n\ttest_target = joblib.load(\"data/test_target\")\n\n\tfor model in models:\n\t\tmodel = load_model(model)\n\t\taccuracy = model.score(test_data, test_target)\n\t\tstart = time.time()\n\t\tfor i in range(1000):\n\t\t\tmodel.predict_proba([test_data[i]])\n\t\tend = time.time()\n\t\tduration = end - start\n\t\tprint(model, accuracy, duration)", "def printModelAndTime(self):\n import time\n self._reporter.writeOutput(\"Model name = \" + self.modelName + '\\n' +\n \"Output directory = \" + self._outputDir_ + '\\n' +\n \"Time = \" + time.asctime() + '\\n')\n return" ]
[ "0.68548775", "0.6575847", "0.6525138", "0.64967567", "0.6452878", "0.6437493", "0.63883066", "0.6276323", "0.6178201", "0.613917", "0.60818535", "0.6019198", "0.59552115", "0.5952985", "0.5936522", "0.5865066", "0.5844863", "0.5840789", "0.5832643", "0.58007747", "0.5759451", "0.57562476", "0.57495", "0.57309955", "0.570664", "0.56982523", "0.5694974", "0.5671473", "0.5646463", "0.5622423" ]
0.6702151
1
Here is where we recieve streamed images from the Kafka Server and convert them to a Flaskreadable format.
def getCamera1(): for msg in camera1: yield (b'--frame\r\n' b'Content-Type: image/jpg\r\n\r\n' + base64.b64decode(msg.value['image_bytes']) + b'\r\n\r\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen(self):\n\n # context = zmq.Context()\n # receiver = context.socket(zmq.PULL)\n self.receiver.connect(inference_url())\n\n while self.is_opened:\n ret = self.receiver.recv_pyobj()\n\n nparr = np.frombuffer(np.array(ret['data']), np.uint8)\n\n # logger.warning('Receive: %s', ret['ts'])\n # logger.warning('Time elapsed: %s', (time.time()-self.keep_alive))\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # ret2 = receiver.recv_pyobj()\n # logger.warning(ret2['ts'])\n # logger.warning(ret2['shape'])\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n cv2.imencode('.jpg', img)[1].tobytes() + b'\\r\\n')\n self.receiver.close()", "def getCamera2():\n for msg in camera2:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpg\\r\\n\\r\\n' + base64.b64decode(msg.value['image_bytes']) + b'\\r\\n\\r\\n')", "def gen_livestream():\n\n flag = True\n frame = _dog()\n while True:\n time.sleep(0.02)\n if app.images.qsize():\n image = app.images.get()\n if flag:\n image = base64_to_cv2(image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n rects = detector(gray, 0)\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n \n for (x, y) in shape:\n cv2.circle(image, (x, y), 2, (0, 255, 0), -1)\n _, frame = cv2.imencode('.jpg', image)\n else:\n frame = _dog()\n # print(position)\n flag = not flag\n # yield ('Content-Type: image/jpeg\\r\\n\\r\\n' + base64.b64encode(frame).decode(\"utf-8\") + '\\r\\n')\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def video_test():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # do some fancy processing here....\n\n # build a response dict to send back to client\n response = {'message': 'image received. size={}x{}'.format(img.shape[1], img.shape[0])\n }\n print(response)\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n cv2.imwrite(\"1.jpg\", img)\n print(\"done\")\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))", "def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()", "def process_images():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n image = unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key'])\n s3.download_file(input_bucket_name, image, image)\n process_json(image)\n upload_image(image)\n cleanup_files(image)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def getCamera3():\n for msg in camera3:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpg\\r\\n\\r\\n' + base64.b64decode(msg.value['image_bytes']) + b'\\r\\n\\r\\n')", "def list_image_stream(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_image_stream\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/imagestreams'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ImageStreamList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def client_handler(inbound_socket, addr, job_queue, result_queue):\n global last_pic\n print(inbound_socket)\n\n def draw_boxes(boxes):\n mask = Image.new('RGBA', picSize, (255, 255, 255, 0))\n d = ImageDraw.Draw(mask)\n fnt = ImageFont.truetype(p.FONT_PATH, 12)\n txt_offset_x = 0\n txt_offset_y = 20\n for box in boxes:\n p_coords = [box.coords[0]*picSize[0],\n box.coords[1]*picSize[1],\n box.coords[2]*picSize[0],\n box.coords[3]*picSize[1]]\n d.rectangle(p_coords, outline='red')\n print('drawing box at ', end='')\n # print([x for x in box.coords])\n textpos = (p_coords[0] - txt_offset_x, p_coords[1] - txt_offset_y)\n d.text(textpos, 'Class %s at %s confidence' %\n (box.classification, box.confidence), font=fnt, fill='red')\n\n return mask\n try:\n camera_socket = socket.socket()\n camera_socket.connect(('dronepi.local', 8000))\n camera_connection = camera_socket.makefile('rwb')\n\n client_connection = inbound_socket.makefile('rwb')\n image_stream = io.BytesIO()\n char_len = struct.calcsize('<c')\n long_len = struct.calcsize('<L')\n while True:\n t = time.time()\n command = struct.unpack('<c', client_connection.read(char_len))[0]\n t = time_op(t, 'recv command')\n if command != b'':\n if command == b'p':\n last_pic.save(image_stream,\n format='jpeg',\n quality=85,\n thumbnail=None)\n t = time_op(t, 'save pic')\n header = struct.pack('<L', image_stream.tell())\n client_connection.write(header)\n t = time_op(t, 'send header')\n # Rewind the stream and send the image data over the wire\n image_stream.seek(0)\n client_connection.write(image_stream.read())\n client_connection.flush()\n t = time_op(t, 'send pic')\n # reset stream\n image_stream.seek(0)\n image_stream.truncate()\n\n elif command == b'c':\n camera_connection.write(b'p')\n camera_connection.flush()\n t = time_op(t, 'send cam request')\n image_len_raw = camera_connection.read(long_len)\n image_len = struct.unpack('<L', image_len_raw)[0]\n t = time_op(t, 'recv header')\n if not image_len:\n print('Received image length of 0, quitting!')\n break\n # Construct a stream to hold the image data and\n # read the image data from the connection\n image_stream.write(camera_connection.read(image_len))\n t = time_op(t, 'recv pic')\n # Rewind the stream, open it as an image with PIL and\n # do some processing on it\n image_stream.seek(0)\n image = Image.open(image_stream)\n\n t = time_op(t, 'open pic & process')\n job_queue.put(image)\n job_queue.join()\n t = time_op(t, 'NN')\n\n image_stream.seek(0)\n image_stream.truncate()\n\n bboxes = result_queue.get(False)\n box_pickle = pickle.dumps(bboxes, protocol=3)\n pickle_size = len(box_pickle)\n t = time_op(t, 'pickle')\n client_connection.write(struct.pack('<L', pickle_size))\n client_connection.write(box_pickle)\n client_connection.flush()\n t = time_op(t, 'send pickle')\n\n last_pic = image\n\n elif command == b'd':\n camera_connection.write(b'p')\n camera_connection.flush()\n t = time_op(t, 'send cam request')\n image_len_raw = camera_connection.read(long_len)\n image_len = struct.unpack('<L', image_len_raw)[0]\n t = time_op(t, 'recv header')\n if not image_len:\n print('Received image length of 0, quitting!')\n break\n # Construct a stream to hold the image data and read\n # the image data from the connection\n\n image_stream.write(camera_connection.read(image_len))\n t = time_op(t, 'recv pic')\n # Rewind the stream, open it as an image with PIL and\n # do some processing on it\n image_stream.seek(0)\n image = Image.open(image_stream)\n\n t = time_op(t, 'open pic & process')\n job_queue.put(image)\n job_queue.join()\n t = time_op(t, 'NN')\n\n image_stream.seek(0)\n image_stream.truncate()\n\n bboxes = result_queue.get(False)\n\n box_count = len(bboxes)\n client_connection.write(struct.pack('<L', box_count))\n for box in bboxes:\n data = [box.coords[0],\n box.coords[1],\n box.coords[2],\n box.coords[3],\n box.confidence,\n box.classification]\n #print(data)\n client_connection.write(struct.pack('<ffffff',\n data[0],\n data[1],\n data[2],\n data[3],\n data[4],\n data[5]))\n client_connection.flush()\n t = time_op(t, 'send tuples')\n\n last_pic = image\n except:\n print('Error: %s' % sys.exc_info()[0], flush=True)\n print('Error: %s' % sys.exc_info()[1], flush=True)\n print('Error: %s' % sys.exc_info()[2], flush=True)\n client_connection.close()\n camera_connection.close()\n inbound_socket.close()\n camera_socket.close()\n return 0", "def gen(camera):\n while livestreamOn == True:\n frame = camera.get_frame()\n img = cv2.imencode('.jpg', frame)[1]\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + img.tobytes() + b'\\r\\n')", "def _stream_raw_image_onto_device(self, image_info, device):\n starttime = time.time()\n total_retries = CONF.image_download_connection_retries\n for attempt in range(total_retries + 1):\n try:\n image_download = ImageDownload(image_info, time_obj=starttime)\n\n with open(device, 'wb+') as f:\n try:\n for chunk in image_download:\n f.write(chunk)\n except Exception as e:\n msg = ('Unable to write image to device {}. '\n 'Error: {}').format(device, str(e))\n raise errors.ImageDownloadError(image_info['id'], msg)\n except errors.ImageDownloadError as e:\n if attempt == CONF.image_download_connection_retries:\n raise\n else:\n LOG.warning('Image download failed, %(attempt)s of '\n '%(total)s: %(error)s',\n {'attempt': attempt,\n 'total': total_retries,\n 'error': e})\n time.sleep(CONF.image_download_connection_retry_interval)\n else:\n break\n\n totaltime = time.time() - starttime\n LOG.info(\"Image streamed onto device %(device)s in %(totaltime)s \"\n \"seconds for %(size)s bytes. Server originaly reported \"\n \"%(reported)s.\",\n {'device': device, 'totaltime': totaltime,\n 'size': image_download.bytes_transferred,\n 'reported': image_download.content_length})\n # Verify if the checksum of the streamed image is correct\n image_download.verify_image(device)\n # Fix any gpt partition\n try:\n disk_utils.fix_gpt_partition(device, node_uuid=None)\n except exception.InstanceDeployFailure:\n # Note: the catch internal to the helper method logs any errors.\n pass\n # Fix the root partition UUID\n root_uuid = disk_utils.block_uuid(device)\n LOG.info(\"%(device)s UUID is now %(root_uuid)s\",\n {'device': device, 'root_uuid': root_uuid})\n self.partition_uuids['root uuid'] = root_uuid", "def test1234():\n r = request\n #\n data = uncompress_nparr(r.data) #uncompress data\n print(\"data type:{}\", type(data))\n #nparr = np.frombuffer(r.data, np.uint8)\n\n is_success, buffer = cv2.imencode(\".jpg\", data)\n io_buf = io.BytesIO(buffer)\n decode_img = cv2.imdecode(np.frombuffer(io_buf.getbuffer(), np.uint8), -1) # image\n #img = cv2.imdecode(nparr , cv2.IMREAD_COLOR)\n img_name = \"Received_JuanJoxe{}.png\".format(img_counter)\n\n cv2.imwrite(os.path.join(uploads_dir, img_name), decode_img)\n\n #\n data10 = data*10\n print(\"\\n\\nReceived array (compressed size = \"+\\\n str(r.content_length)+\"):\\n\"+str(data))\n resp, _, _ = compress_nparr(data)\n response = {'message': 'image received. size={}x{} name:{}'.format(decode_img.shape[1], decode_img.shape[0], img_name)} #this is json\n print('message image received. size={}x{} name:{}'.format(decode_img.shape[1], decode_img.shape[0], img_name))\n\n\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def process_image(self):\n pass", "def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result", "def input_handler(data, context):\n\n if context.request_content_type == \"application/x-image\":\n payload = data.read()\n encoded_image = base64.b64encode(payload).decode(\"utf-8\")\n instance = [{\"b64\": encoded_image}]\n return json.dumps({\"instances\": instance})\n else:\n _return_error(\n 415, 'Unsupported content type \"{}\"'.format(context.request_content_type or \"Unknown\")\n )", "def gen():\n while True:\n retval, frame = vc.read()\n\n if retval:\n #image_processing(frame)\n frame = cv2.imencode('.jpg', frame)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def make_reply(self,request,nreplies):\n #print(\"DummyPyWorker. Sending client message back\")\n self._log.debug(\"received message with {} parts\".format(len(request)))\n\n if not self.is_model_loaded():\n self._log.debug(\"model not loaded for some reason. loading.\")\n\n try:\n import torch\n except:\n raise RuntimeError(\"could not load pytorch!\")\n\n # message pattern: [image_bson,image_bson,...]\n\n nmsgs = len(request)\n nbatches = nmsgs/self.batch_size\n\n if not self._still_processing_msg:\n self._next_msg_id = 0\n\n # turn message pieces into numpy arrays\n img2d_v = []\n sizes = []\n frames_used = []\n rseid_v = []\n for imsg in xrange(self._next_msg_id,nmsgs):\n try:\n compressed_data = str(request[imsg])\n data = zlib.decompress(compressed_data)\n c_run = c_int()\n c_subrun = c_int()\n c_event = c_int()\n c_id = c_int()\n img2d = larcv.json.image2d_from_pystring(data,\n c_run, c_subrun, c_event, c_id )\n except:\n self._log.error(\"Image Data in message part {}\\\n could not be converted\".format(imsg))\n continue\n self._log.debug(\"Image[{}] converted: {}\"\\\n .format(imsg,img2d.meta().dump()))\n\n # check if correct plane!\n if img2d.meta().plane()!=self.plane:\n self._log.debug(\"Image[{}] is the wrong plane!\".format(imsg))\n continue\n\n # check that same size as previous images\n imgsize = (int(img2d.meta().cols()),int(img2d.meta().rows()))\n if len(sizes)==0:\n sizes.append(imgsize)\n elif len(sizes)>0 and imgsize not in sizes:\n self._log.debug(\"Next image a different size. \\\n we do not continue batch.\")\n self._next_msg_id = imsg\n break\n img2d_v.append(img2d)\n frames_used.append(imsg)\n rseid_v.append((c_run.value,c_subrun.value,c_event.value,c_id.value))\n if len(img2d_v)>=self.batch_size:\n self._next_msg_id = imsg+1\n break\n\n\n # convert the images into numpy arrays\n nimgs = len(img2d_v)\n self._log.debug(\"converted msgs into batch of {} images. frames={}\"\n .format(nimgs,frames_used))\n np_dtype = np.float32\n img_batch_np = np.zeros( (nimgs,1,sizes[0][1],sizes[0][0]),\n dtype=np_dtype )\n\n for iimg,img2d in enumerate(img2d_v):\n meta = img2d.meta()\n img2d_np = larcv.as_ndarray( img2d )\\\n .reshape( (1,1,meta.cols(),meta.rows()))\n\n img2d_np=np.transpose(img2d_np,(0,1,3,2))\n img_batch_np[iimg,:] = img2d_np\n\n # print(\"shape of image: \",img2d_np.shape)\n\n\n # now make into torch tensor\n img2d_batch_t = torch.from_numpy( img_batch_np ).to(self.device)\n # out_batch_np = img2d_batch_t.detach().cpu().numpy()\n # out_batch_np=np.transpose(out_batch_np,(0,1,3,2))\n\n print(\"shape of image: \",img2d_batch_t.shape)\n with torch.set_grad_enabled(False):\n out_batch_np = self.model.forward(img2d_batch_t).detach().cpu().numpy()\n out_batch_np=np.transpose(out_batch_np,(0,1,3,2))\n\n\n\n # compression techniques\n ## 1) threshold values to zero\n ## 2) suppress output for non-adc values\n ## 3) use half\n\n # suppress small values\n out_batch_np[ out_batch_np<1.0e-3 ] = 0.0\n\n # threshold\n # for ich in xrange(out_batch_np.shape[1]):\n # out_batch_np[:,ich,:,:][ img_batch_np[:,0,:,:]<10.0 ] = 0.0\n\n # convert back to full precision, if we used half-precision in the net\n\n self._log.debug(\"passed images through net. output batch shape={}\"\n .format(out_batch_np.shape))\n # convert from numpy array batch back to image2d and messages\n reply = []\n for iimg in xrange(out_batch_np.shape[0]):\n img2d = img2d_v[iimg]\n rseid = rseid_v[iimg]\n meta = img2d.meta()\n\n out_np = out_batch_np[iimg,0,:,:]\n # print(\"out_np\",type(out_np))\n # print(\"meta\",type(meta))\n out_img2d = larcv.as_image2d_meta( out_np, meta )\n bson = larcv.json.as_pystring( out_img2d,\n rseid[0], rseid[1], rseid[2], rseid[3] )\n compressed = zlib.compress(bson)\n reply.append(compressed)\n\n if self._next_msg_id>=nmsgs:\n isfinal = True\n self._still_processing_msg = False\n else:\n isfinal = False\n self._still_processing_msg = True\n\n self._log.debug(\"formed reply with {} frames. isfinal={}\"\n .format(len(reply),isfinal))\n return reply,isfinal", "def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)", "def recv_img(self, filename):\n recv_data = b'' # packet of byte string data\n save_data = b'' # data to be saved to file\n img_not_recvd = True # flag to indicate if image has been recieved\n exp_seq = 0 # expected sequence number initially 0\n pkt = Packet()\n\n # get image data from client until all data received\n while True:\n try:\n if img_not_recvd:\n print(\"Client: Ready to receive image\", flush=True)\n # start = time()\n recv_data = self.client_socket.recv(self.pkt_size)\n\n pkt.pkt_unpack(recv_data)\n if pkt.seq_num != exp_seq or pkt.csum != pkt.checksum(pkt.seq_num, pkt.data):\n ack = Packet(exp_seq ^ 1, \"ACK\")\n else:\n save_data += pkt.data\n ack = Packet(exp_seq, \"ACK\")\n exp_seq ^= 1\n\n ack_pack = ack.pkt_pack()\n self.client_socket.sendto(ack_pack, self.server_addr)\n\n if img_not_recvd:\n img_not_recvd = False # img data began streaming if it reaches this point\n\n except socket.timeout:\n # if image not recieved yet, keep waiting\n if img_not_recvd:\n pass\n # image has been recieved\n else:\n # write data into a file\n # end = time()\n # print(\"Client: Time to receive image:\", end - start - 2)\n with open(filename, 'wb+') as server_img:\n server_img.write(save_data)\n print(\"Client: Received and saved image\", flush=True)\n break # exit loop", "def process(self, image):", "def main():\n # Set up socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('localhost', 12345))\n dat = b''\n dataSegement = [0] * 5\n\n while True:\n seg, addr = s.recvfrom(MAX_DGRAM)\n print(\"type: \", type(seg))\n chunk_number = struct.unpack(\"B\", seg[0:1])[0]\n if chunk_number > 1:\n print(\"chunk_number: \", chunk_number)\n dat += seg[1:]\n else:\n dat += seg[1:]\n img = cv2.imdecode(np.frombuffer(dat, dtype=np.uint8), 1)\n cv2.imwrite(\"image/4k_image_sample_compressed.jpg\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n dat = b\"\"", "def gen(camera):\n \n while True:\n \n \n \n frame = camera.get_frame()\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def download_images(self):\n # download the json for the thread\n self.download_json()\n\n # open the json file\n with self.file.open('r', encoding=\"utf-8\") as json_file:\n # load into data\n data = json.load(json_file)\n\n # grab the posts from\n posts = data[\"posts\"]\n\n # iterate through posts in the thread\n for post_num in range(len(posts)):\n # grab the current post\n post = posts[post_num]\n\n # try to get these attributes. may throw an error because not\n # all posts or replies have images attached\n try:\n # images should have these attributes\n tim = post[\"tim\"]\n ext = post[\"ext\"]\n width = post[\"w\"]\n height = post[\"h\"]\n desired_size = False\n\n # filename consists of \"tim.ext\"\n image_filename = str(tim) + str(ext)\n\n # set resolution based on bool arguments\n if self.sd:\n self.min_width = 720\n self.min_height = 480\n if self.hd:\n self.min_width = 1280\n self.min_height = 720\n if self.fhd:\n self.min_width = 1920\n self.min_height = 1080\n if self.uhd:\n self.min_width = 3840\n self.min_height = 2160\n\n # check if the image is the desired size\n if (height <= self.max_height and height >= self.min_height\n ) and (width <= self.max_width\n and width >= self.min_width):\n desired_size = True\n\n if desired_size:\n try:\n # request image variables\n image_url = self.images_endpoint + image_filename\n image_res = requests.get(image_url)\n image_content = image_res.content\n\n # local image variables\n image_string = str(self.images_path.absolute()) + \\\n \"\\\\\" + image_filename\n image_file = Path(image_string)\n\n # write to disk\n if self.verbose:\n print(\"Downloading\", image_url, \"to\",\n image_string, \"from thread\",\n self.thread_id, \"with a resolution of\",\n width, \"x\", height)\n with image_file.open(\"wb\") as im_file:\n im_file.write(image_content)\n except KeyboardInterrupt:\n sys.exit(1)\n\n except KeyError:\n pass", "def serve_image_fn(image_tensor):\n return model(image_tensor)", "def _work(self, payload):\n pdf_path = payload['pdf_path']\n jpeg_prefixes = payload['jpeg_prefixes']\n\n webhook_url = payload['webhook_url']\n webhook_data = payload['webhook_data']\n\n page_start = payload['page_start']\n page_end = payload['page_end']\n\n self._log('Connecting to s3')\n connection, bucket = self._connect_to_s3(payload['s3'])\n\n # create batches of pages to convert\n for batch_first_page in range(page_start, page_end + 1, self.PAGES_PER_BATCH):\n batch_last_page = min(batch_first_page + self.PAGES_PER_BATCH - 1, page_end)\n batch_pages = range(batch_first_page, batch_last_page + 1)\n\n batch_jpeg_prefixes = jpeg_prefixes[batch_first_page - page_start:\n batch_last_page + 1 - page_start]\n self._convert_batch(bucket, pdf_path, batch_pages, batch_jpeg_prefixes,\n webhook_url, webhook_data)", "def image_server():\n yield from http_server_thread(ImageHandler)", "def create_image_stream(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_image_stream\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_image_stream`\")\n\n resource_path = '/oapi/v1/imagestreams'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1ImageStream',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def receive_images(p_image_queue,p_new_im_id_queue, host = \"127.0.0.1\", port = 6200, timeout = 20, VERBOSE = True,worker_num = 0):\n \n # open ZMQ socket\n context = zmq.Context()\n sock = context.socket(zmq.SUB)\n sock.connect(\"tcp://{}:{}\".format(host, port))\n sock.subscribe(b'') # subscribe to all topics on this port (only images)\n \n print (\"w{}: Image receiver thread connected to socket.\".format(worker_num))\n \n # main receiving loop\n prev_time = time.time()\n while time.time() - prev_time < timeout:\n try:\n temp = sock.recv_pyobj(zmq.NOBLOCK)\n (name,im) = pickle.loads(temp)\n p_image_queue.put((name,im,time.time())) \n p_new_im_id_queue.put(name)\n prev_time = time.time()\n if VERBOSE: print(\"w{}: Image receiver thread received image {} at {}\".format(worker_num,name,time.ctime(prev_time)))\n except zmq.ZMQError:\n time.sleep(0.1)\n pass\n \n sock.close()\n print (\"w{}: Image receiver thread closed socket.\".format(worker_num))", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)" ]
[ "0.62036604", "0.59223443", "0.58639634", "0.58136076", "0.5789331", "0.57704866", "0.5751171", "0.5749711", "0.57081705", "0.5689918", "0.56880987", "0.5680199", "0.56609046", "0.56456715", "0.56061023", "0.5568508", "0.5554283", "0.5522024", "0.5515289", "0.55122995", "0.5509296", "0.5493851", "0.54872143", "0.5473153", "0.54683566", "0.5467544", "0.5452022", "0.5414295", "0.5413903", "0.5405614" ]
0.60920733
1
generate and return a layer dict showing techniques used by software If softwaretype is specified as "malware" or "tool", only shows software of that type. If softwaretype is specified as "software" output layer shows both malware and tools
def generate(softwaretype="software"): # import the STIX data from MITRE/CTI stix = requests.get("https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json").json() ms = MemoryStore(stix_data=stix["objects"]) # software includes malware and tool types so perform two queries and merge the results software_filters = [] if softwaretype == "malware" or softwaretype == "software": software_filters.append( [ Filter('type', '=', 'malware') ] ) if softwaretype == "tool" or softwaretype == "software": software_filters.append( [ Filter('type', '=', 'tool') ] ) software = list(chain.from_iterable( ms.query(f) for f in software_filters )) # build a list of techniques used by software techniques_used = {} #attackID => using software names for thesoftware in software: # filter out revoked and deprecated software if ("x_mitre_deprecated" in thesoftware and thesoftware["x_mitre_deprecated"]) or ("revoked" in thesoftware and thesoftware["revoked"]): continue for relationship in ms.relationships(thesoftware["id"]): # skip all non-technique relationships if "attack-pattern" not in relationship["target_ref"]: continue technique = ms.get(relationship["target_ref"]) # filter out deprecated and revoked techniques if ("x_mitre_deprecated" in technique and technique["x_mitre_deprecated"]) or ("revoked" in technique and technique["revoked"]): continue techniqueID = technique["external_references"][0]["external_id"] # store usage in techniques_used struct if techniqueID in techniques_used: techniques_used[techniqueID].append(thesoftware["name"]) else: techniques_used[techniqueID] = [thesoftware["name"]] # format the techniques for the output layer techniques_list = [] highest_usage = 0 lowest_usage = 1 for techniqueID in techniques_used: # determine the number of used techniques for the score count = len(techniques_used[techniqueID]) highest_usage = max(highest_usage, count) lowest_usage = min(lowest_usage, count) # append technique struct to list of layer-formatted techniques techniques_list.append({ "techniqueID": techniqueID, "comment": "executed by " + ", ".join(techniques_used[techniqueID]), "score": count, }) # set up layer name and desc according to softwaretype if softwaretype != "software": plural = "tools" if softwaretype == "tool" else "malware" layername = f"Software ({softwaretype}) Execution" layerdescription = f"All techniques that can be executed by software of subtype {softwaretype}, where the score is the count of {plural} using the technique" else: layername = "Software Execution" layerdescription = f"All techniques that can be executed by software, where the score is the count of software using the technique" # construct and return the layer as a dict return { "name": layername, "description": layerdescription, "versions": { "layer": "4.1", "navigator": "4.1" }, "domain": "enterprise-attack", "techniques": techniques_list, "sorting": 3, # order in descending order of score (count) "gradient": { "colors": [ "#fff7b3", # low counts are yellow "#ff6666", # high counts are red ], "minValue": lowest_usage, "maxValue": highest_usage }, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_software(\n client: Act, matrice: AttckMatrice, output_format: Text = \"json\"\n) -> List:\n\n notify: List = []\n\n # Enterprise matrice has malwares and tools, but preattack has none of them\n for software in getattr(matrice, \"malwares\", []) + getattr(matrice, \"tools\", []):\n if deprecated_or_revoked(software):\n # Object is revoked/deprecated, add to notification list but do not add to facts that should be added to the platform\n notify.append(software)\n continue\n\n tool_name = software.name\n\n # Tool category\n handle_fact(\n client.fact(\"category\", software.type).source(\"tool\", tool_name),\n output_format=output_format,\n )\n\n for alias in software.alias:\n alias_name = alias\n\n if tool_name != alias_name:\n # Tool category (alias)\n handle_fact(\n client.fact(\"category\", software.type).source(\"tool\", alias_name),\n output_format=output_format,\n )\n handle_fact(\n client.fact(\"alias\").bidirectional(\n \"tool\", tool_name, \"tool\", alias_name\n ),\n output_format=output_format,\n )\n\n for technique in software.techniques:\n handle_fact(\n client.fact(\"implements\")\n .source(\"tool\", software.name)\n .destination(\"technique\", technique.id),\n output_format=output_format,\n )\n\n return notify", "def get_techniques_used_by_tools():\n global techniques_used_by_tools\n\n if not techniques_used_by_tools:\n techniques_used_by_tools = rsh.techniques_used_by_tools(get_srcs())\n \n return techniques_used_by_tools", "def get_tools_using_technique():\n global tools_using_technique\n\n if not tools_using_technique:\n tools_using_technique = rsh.tools_using_technique(get_srcs())\n \n return tools_using_technique", "def deduce_software(self,\n job_type: Optional[str] = None):\n\n # OneDMin\n if job_type == 'onedmin':\n if 'onedmin' not in supported_ess:\n raise ValueError(f'Could not find the OneDMin software to compute Lennard-Jones parameters.\\n'\n f'levels_ess is:\\n{levels_ess}')\n self.software = 'onedmin'\n\n # QChem\n if job_type == 'orbitals':\n # currently we only have a script to print orbitals on QChem,\n # could/should be elaborated to additional ESS\n if 'qchem' not in supported_ess:\n raise ValueError(f'Could not find the QChem software to compute molecular orbitals.\\n'\n f'levels_ess is:\\n{levels_ess}')\n self.software = 'qchem'\n\n # Orca\n if 'dlpno' in self.method:\n if 'orca' not in supported_ess:\n raise ValueError(f'Could not find Orca to run a DLPNO job.\\nlevels_ess is:\\n{levels_ess}')\n self.software = 'orca'\n\n # Gaussian\n if self.method_type == 'composite' or job_type == 'composite' or job_type == 'irc' \\\n or any([sum(['iop' in value.lower() for value in subdict.values()]) for subdict in self.args.values()]):\n if 'gaussian' not in supported_ess:\n raise ValueError(f'Could not find Gaussian to run the {self.method}.\\n'\n f'levels_ess is:\\n{levels_ess}')\n self.software = 'gaussian'\n\n # TorchANI\n if 'torchani' in self.method:\n self.software = 'torchani'\n\n # xTB\n if 'xtb' in self.method or 'gfn' in self.method:\n self.software = 'xtb'\n\n # User phrases from settings (levels_ess)\n if self.software is None:\n for ess, phrase_list in levels_ess.items():\n for phrase in phrase_list:\n if self.software is None and \\\n (phrase in self.method or self.basis is not None and phrase in self.basis):\n self.software = ess.lower()\n\n if self.software is None:\n preferred_ess_order = ['gaussian', 'qchem', 'orca', 'molpro', 'terachem', 'cfour']\n\n if self.method_type in ['force_field', 'semiempirical']:\n preferred_ess_order = ['gaussian', 'qchem', 'orca', 'molpro', 'terachem']\n elif self.method_type in ['wavefunction']:\n preferred_ess_order = ['molpro', 'gaussian', 'orca', 'cfour', 'qchem']\n elif self.method_type in ['composite']:\n preferred_ess_order = ['gaussian']\n elif self.method_type in ['dft']:\n preferred_ess_order = ['gaussian', 'qchem', 'terachem', 'orca']\n\n self.determine_compatible_ess()\n relevant_software = get_ordered_intersection_of_two_lists(self.compatible_ess, supported_ess)\n self.software = get_ordered_intersection_of_two_lists(preferred_ess_order, relevant_software)[0] \\\n if relevant_software else None", "def get_technique_stats(db):\n \n techniques = {}\n for element in db:\n for technique in element['techniques_used']:\n if technique not in techniques:\n temp_dict = {technique: [{u'id': element['project_no'], \n u'name': element['project_name'], u'description': element['short_description']}]}\n techniques.update(temp_dict)\n else:\n temp_dict = {u'id': element['project_no'],\n u'name': element['project_name'], u'description': element['short_description']}\n techniques[technique].append(temp_dict)\n techniques[technique] = sorted(techniques[technique], key=itemgetter('name'))\n\n return techniques", "def get_techniques_used_by_malware():\n global techniques_used_by_malware\n \n if not techniques_used_by_malware:\n techniques_used_by_malware = rsh.techniques_used_by_malware(get_srcs())\n \n return techniques_used_by_malware", "def visualization_softwares(text):\n t = ['power bi', 'powerbi', 'tableau', 'd3', 'qlikview', 'datawrapper']\n for i in t:\n x = re.search(i, text.lower())\n if x:\n result = True\n else:\n result = False\n return result", "def get(self):\n print(\"print tools!\")\n tools = get_tools()\n # print(tools[0].supportedDataTypes)\n\n return tools, 200", "def outputs_for_module_type_operation(self):\n result = []\n module = type(self).__name__\n turbine_rating_MW = self.input_dict['turbine_rating_MW']\n num_turbines = self.input_dict['num_turbines']\n project_size_kw = num_turbines * turbine_rating_MW * 1000\n\n if self.in_distributed_mode:\n result.append({\n 'type_of_cost': 'total_management_cost',\n 'raw_cost': self.output_dict['total_management_cost']\n })\n\n else:\n result.append({\n 'type_of_cost': 'insurance',\n 'raw_cost': self.output_dict['insurance_usd']\n })\n result.append({\n 'type_of_cost': 'Construction Permitting',\n 'raw_cost': self.output_dict['construction_permitting_usd']\n })\n result.append({\n 'type_of_cost': 'Project Management',\n 'raw_cost': self.output_dict['project_management_usd']\n })\n result.append({\n 'type_of_cost': 'Bonding',\n 'raw_cost': self.output_dict['bonding_usd']\n })\n result.append({\n 'type_of_cost': 'Markup Contingency',\n 'raw_cost': self.output_dict['markup_contingency_usd']\n })\n result.append({\n 'type_of_cost': 'Engineering Foundation and Collections System (includes met mast)',\n 'raw_cost': self.output_dict['engineering_usd']\n })\n result.append({\n 'type_of_cost': 'Site Facility',\n 'raw_cost': self.output_dict['site_facility_usd']\n })\n\n for _dict in result:\n _dict['turbine_rating_MW'] = self.input_dict['turbine_rating_MW']\n _dict['num_turbines'] = self.input_dict['num_turbines']\n _dict['rotor_diameter_m'] = self.input_dict['rotor_diameter_m']\n _dict['project_id_with_serial'] = self.project_name\n _dict['operation_id'] = 'Management'\n _dict['module'] = module\n _dict['raw_cost_total_or_per_turbine'] = 'total'\n _dict['cost_per_turbine'] = _dict['raw_cost'] / num_turbines\n _dict['cost_per_project'] = _dict['raw_cost']\n _dict['usd_per_kw_per_project'] = _dict['raw_cost'] / project_size_kw\n\n return result", "def tech_specific_to_dataset(model_run):\n data_dict = collections.defaultdict(lambda: {\"dims\": [\"techs\"], \"data\": []})\n\n systemwide_constraints = set(\n [\n k.split(\".\")[-1]\n for k in model_run.techs.keys_nested()\n if \".constraints.\" in k and k.endswith(\"_systemwide\")\n ]\n )\n\n for tech in model_run.sets[\"techs\"]:\n if tech in model_run.sets[\"techs_transmission\"]:\n tech = tech.split(\":\")[0]\n data_dict[\"colors\"][\"data\"].append(\n model_run.techs[tech].get_key(\"essentials.color\")\n )\n data_dict[\"inheritance\"][\"data\"].append(\n \".\".join(model_run.techs[tech].get_key(\"inheritance\"))\n )\n data_dict[\"names\"][\"data\"].append(\n # Default to tech ID if no name is set\n model_run.techs[tech].get_key(\"essentials.name\", tech)\n )\n for k in systemwide_constraints:\n data_dict[k][\"data\"].append(\n model_run.techs[tech].constraints.get_key(k, np.nan)\n )\n\n return data_dict", "def add_techniques(\n client: Act, matrice: AttckMatrice, output_format: Text = \"json\"\n) -> List:\n\n notify = []\n\n for technique in matrice.techniques:\n notify += handle_techniques(client, technique, None, output_format)\n\n for subtechnique in getattr(technique, \"subtechniques\", []):\n # Pre Attack does not have sub techniques\n notify += handle_techniques(client, subtechnique, technique, output_format)\n\n return notify", "def get_techniques_used_by_groups():\n global techniques_used_by_groups\n\n if not techniques_used_by_groups:\n techniques_used_by_groups = rsh.techniques_used_by_groups(get_srcs())\n\n return techniques_used_by_groups", "def get_malware_using_technique():\n global malware_using_technique\n\n if not malware_using_technique:\n malware_using_technique = rsh.malware_using_technique(get_srcs())\n \n return malware_using_technique", "def _its_tool_ ( self , typename , name = None , interface = None , createIf = True , parent = None ) :\n if not name :\n t,s,n = typename.rpartition('/')\n if t and s and n :\n typename = t\n name = n\n else : name = typename\n \n p1 = typename.find ( ':PUBLIC' )\n if 0 < p1 and p1 + 6 == len(typename) :\n typename = typename [:p1]\n \n itool = GaudiPython.Bindings.Helper.tool ( self._its , typename , name , parent , createIf ) \n if itool and interface :\n iif = GaudiPython.Bindings.InterfaceCast(interface)( itool )\n if not iif : logger.warning(\"Can't retrieve proepr interface %s for %s\" % ( interface , itool ) )\n return GaudiPython.Bindings.iAlgTool ( itool.name() , iif )\n elif not itool : logger.warning(\"Can't retrieve the tool %s'%s\" % ( typename , name ) )\n \n return GaudiPython.Bindings.iAlgTool ( name , itool )", "def __initAvailableLayerTypes(self):\n from backend.caffe.path_loader import PathLoader\n caffe = PathLoader().importCaffe()\n layerNameMainParts = list(caffe.layer_type_list())\n\n res = {}\n paramsPerLayerType = {}\n\n # calculate common parameters of all layer types\n # by removing all which will be used for one specific layer type only\n # also keep in mind which ones have been removed to readd them to specific layers\n commonParams = self._availableParameterGroupDescriptors[\"LayerParameter\"].parameter() #use .parameter() on purpose\n layerSpecificParameters = set()\n for nameMainPart in layerNameMainParts:\n specificParamsName = [nameMainPart + \"Parameter\"]\n if moreLayerNameParameter.has_key(nameMainPart):\n specificParamsName.append( moreLayerNameParameter[nameMainPart])\n paramsPerLayerType[nameMainPart] = {}\n for key, value in commonParams.items():\n if value.isParameterGroup() and value.parameterName() in specificParamsName:\n paramsPerLayerType[nameMainPart][key] = value\n layerSpecificParameters.add(key)\n\n\n # special case: shared params for loss layers\n key = \"loss_param\"\n value = commonParams[key]\n del commonParams[key]\n for nameMainPart in layerNameMainParts:\n if LayerType.getCategoryByName(nameMainPart) == LayerType.CATEGORY_LOSS:\n paramsPerLayerType[nameMainPart][key] = value\n\n # TODO is there a special case for the TransformationParameter?\n\n # create each layer type after one another\n for nameMainPart in layerNameMainParts:\n\n # add common params to the specific ones\n layerTypeParam = paramsPerLayerType[nameMainPart].keys()\n paramsPerLayerType[nameMainPart].update(commonParams)\n\n irrelevant = layerSpecificParameters.difference(layerTypeParam)\n res[nameMainPart] = LayerType(nameMainPart, paramsPerLayerType[nameMainPart], layerTypeParam, irrelevant)\n\n self._commonParams = commonParams\n self._availableLayerTypes = res", "def get_software_by_id(self, id_code):\r\n malware_return = self.fs.query([\r\n Filter('type', '=', 'malware'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n tool_return = self.fs.query([\r\n Filter('type', '=', 'tool'),\r\n Filter('external_references.external_id', '=', id_code)\r\n ])\r\n\r\n if malware_return:\r\n return malware_return\r\n elif tool_return:\r\n return tool_return", "def get_techniques(db):\n\n techniques = []\n for element in db:\n for technique in element['techniques_used']:\n if technique not in techniques:\n techniques.append(technique)\n \n return sorted(techniques)", "def workflowLessTypes(self):\n\n tools = [c.getName() for c in\n self.atgenerator.getGeneratedTools(self.package)\n if not\n utils.isTGVFalse(c.getTaggedValue('autoinstall'))]\n tools.sort()\n return tools", "def generate_technique_md(technique, domain, side_menu_data, tactic_list):\n\n attack_id = util.get_attack_id(technique)\n\n # Only add technique if the attack id was found\n if attack_id:\n\n technique_dict = {}\n\n technique_dict['attack_id'] = attack_id\n technique_dict['domain'] = domain.split(\"-\")[0]\n technique_dict['menu'] = side_menu_data\n technique_dict['name'] = technique.get('name')\n\n # Get capecs and mtcs\n for ref in technique['external_references']:\n if ref.get('source_name'):\n if ref['source_name'] == \"capec\":\n if technique_dict.get('capecs') is None:\n technique_dict['capecs'] = []\n capec_dict = {\n 'id': ref['external_id'],\n 'url': ref['url']\n }\n technique_dict['capecs'].append(capec_dict)\n\n if ref['source_name'] == \"NIST Mobile Threat Catalogue\":\n if technique_dict.get('mtcs') is None:\n technique_dict['mtcs'] = []\n\n mtcs_dict = {\n 'id': ref['external_id'],\n 'url': ref['url']\n }\n technique_dict['mtcs'].append(mtcs_dict)\n\n # Get technique external references \n ext_ref = technique[\"external_references\"]\n\n # Get initial reference list\n reference_list = []\n # Decleared as an object to be able to pass by reference\n next_reference_number = {}\n next_reference_number['value'] = 1\n reference_list = util.update_reference_list(reference_list, technique)\n\n # Get technique description\n if technique.get(\"description\"):\n citations_from_descr = util.get_citations_from_descr(technique['description'])\n\n technique_dict['descr'] = util.replace_html_chars(markdown.markdown(technique['description']))\n technique_dict['descr'] = util.filter_urls(technique_dict['descr'])\n technique_dict['descr'] = util.get_descr_reference_sect(citations_from_descr, reference_list, next_reference_number, technique_dict['descr'])\n \n if 'x_mitre_deprecated' in technique:\n technique_dict['deprecated'] = True\n \n # Get mitigation table\n technique_dict['mitigation_table'] = get_mitigations_table_data(technique, reference_list, next_reference_number)\n \n # Get related techniques\n technique_dict['rel_techniques_table'] = get_related_techniques_data(technique, tactic_list)\n\n # Get examples\n technique_dict['examples_table'] = get_examples_table_data(technique, reference_list, next_reference_number)\n\n # Get technique version\n if technique.get(\"x_mitre_version\"):\n technique_dict['version'] = technique[\"x_mitre_version\"]\n\n # Get tactics of technique\n if technique.get('kill_chain_phases'):\n technique_dict['tactics'] = []\n for elem in technique['kill_chain_phases']:\n technique_dict['tactics'].append(elem['phase_name'].title().replace('-', ' '))\n\n # Get platforms that technique uses\n if technique.get('x_mitre_platforms'):\n technique_dict['platforms'] = \", \".join(technique['x_mitre_platforms'])\n\n # Get system requirements\n if technique.get('x_mitre_system_requirements'):\n technique_dict['sysreqs'] = \", \".join(technique['x_mitre_system_requirements'])\n technique_dict['sysreqs'] = re.sub(\"\\.?\\\\n+\", \"; \", technique_dict['sysreqs'])\n\n # Get permissions required\n if technique.get('x_mitre_permissions_required'):\n technique_dict['perms'] = \", \".join(technique['x_mitre_permissions_required'])\n\n # Get effective permissions\n if technique.get('x_mitre_effective_permissions'):\n technique_dict['eff_perms'] = \", \".join(technique['x_mitre_effective_permissions'])\n\n # Get data sources\n if technique.get('x_mitre_data_sources'):\n technique_dict['data_sources'] = \", \".join(technique['x_mitre_data_sources'])\n\n # Get if technique supports remote\n if technique.get('x_mitre_remote_support'):\n if technique['x_mitre_remote_support']:\n technique_dict['supports_remote'] = \" Yes\"\n else:\n technique_dict['supports_remote'] = \" No\"\n\n # Get network requirements\n if technique.get('x_mitre_network_requirements'):\n if technique['x_mitre_network_requirements']:\n technique_dict['network_reqs'] = \" Yes\"\n else:\n technique_dict['network_reqs'] = \" No\"\n\n # Get list of impacts\n if technique.get('x_mitre_impact_type'):\n technique_dict['impact_type'] = \", \".join(technique['x_mitre_impact_type'])\n\n # Get list of defenses bypassed\n if technique.get('x_mitre_defense_bypassed'):\n technique_dict['def_bypass'] = \", \".join(technique['x_mitre_defense_bypassed'])\n\n # Get list of contributors \n if technique.get('x_mitre_contributors'):\n technique_dict['contributors'] = \"; \".join(technique['x_mitre_contributors'])\n\n # Get list of tactic types\n if technique.get('x_mitre_tactic_type'):\n technique_dict['tactic_type'] = \", \".join(technique['x_mitre_tactic_type'])\n\n # Get detection data\n if technique.get('x_mitre_detection'):\n technique_dict['detection'] = get_detection_string(technique['x_mitre_detection'], reference_list, next_reference_number)\n\n # Get if technique is detectable by common defenses\n if technique.get('x_mitre_detectable_by_common_defenses'):\n technique_dict['detectable'] = technique.get('x_mitre_detectable_by_common_defenses')\n\n # Get explanation of detecatable by common defenses\n if technique.get('x_mitre_detectable_by_common_defenses_explanation'):\n technique_dict['detectable_exp'] = util.replace_html_chars(technique['x_mitre_detectable_by_common_defenses_explanation'])\n\n # Get diffulty for adversaries\n if technique.get('x_mitre_difficulty_for_adversary'):\n technique_dict['diff_for_adv'] = technique['x_mitre_difficulty_for_adversary']\n\n # Get explanation of difficulty for adversaries\n if technique.get('x_mitre_difficulty_for_adversary_explanation'):\n technique_dict['diff_for_adv_exp'] = util.replace_html_chars(technique['x_mitre_difficulty_for_adversary_explanation']) \n \n # Add reference for bottom part of technique page\n if reference_list:\n technique_dict['bottom_ref'] = util.sort_reference_list(reference_list)\n\n subs = config.technique_md.substitute(technique_dict)\n subs = subs + json.dumps(technique_dict)\n\n #Write out the markdown file\n with open(os.path.join(config.techniques_markdown_path, technique_dict['attack_id'] +\".md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(subs)", "def test_model_layer_types_ww2x(self):\n \n\t\tdetails = self.watcher.describe(pool=False, min_evals=1)\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\n \t\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8*9, \"8*9 conv2D layers, but {} found\".format(denseCount))", "def supportedSoftwares():\n return [\"any\"]", "def supportedSoftwares():\n return [\"any\"]", "def generate_tools_list():\n out = {}\n\n # Set BETYDB_LOCAL_CACHE_FOLDER = /tools directory\n print(\"Dumping BETY experiments file into \"+os.environ.get('BETYDB_LOCAL_CACHE_FOLDER', \"/home/extractor/\"))\n #dump_experiments()\n\n toollist = [\n \"bin2tif.py\",\n \"nrmac.py\",\n \"canopyCover.py\",\n \"fieldmosaic.py\",\n \"submit_clowder.py\",\n \"submit_bety.py\",\n \"submit_geo.py\",\n \"bety_experiments.json\"\n ]\n\n print(\"Including /tools directory files\")\n for t in toollist:\n #tool_daxf = create_daxf(t, os.path.join(\"tests/workflow/workflow-pilot/workflow_terra/tools\", t))\n tool_daxf = create_daxf(t, os.path.join(os.getcwd(), \"tools\", t))\n # Use filename as dict key in case we need it as input later\n out[t] = tool_daxf\n\n sensor_metadata_list = [\n \"ua-mac/sensor-metadata/sensors/stereo/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/flirIrCamera/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/scanner3D/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/VNIR/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/scanalyzer/sensor_fixed_metadata.json\"\n ]\n print(\"Including sensor fixed metadata\")\n for s in sensor_metadata_list:\n sensor_metadata_daxf = create_daxf(s, os.path.join(sites_dir, s))\n # Use '$SENSOR_fixed' as dict key in case we need it as input later\n out[s.split(\"/\")[-2]+\"_fixed\"] = sensor_metadata_daxf\n\n return out", "def type_for(data):\n switcher = {\n # Startup\n \"FileHeader\": models.FileHeader,\n \"ClearSavedGame\": models.ClearSavedGame,\n \"NewCommander\": models.NewCommander,\n \"LoadGame\": models.LoadGame,\n \"Progress\": models.Progress,\n \"Rank\": models.Rank,\n # Travel\n \"Docked\": models.Docked,\n \"DockingCancelled\": models.DockingCancelled,\n \"DockingDenied\": models.DockingDenied,\n \"DockingGranted\": models.DockingGranted,\n \"DockingRequested\": models.DockingRequested,\n \"DockingTimeout\": models.DockingTimeout,\n \"FSDJump\": models.FSDJump,\n \"Liftoff\": models.Liftoff,\n \"Location\": models.Location,\n \"SupercruiseEntry\": models.SupercruiseEntry,\n \"SupercruiseExit\": models.SupercruiseExit,\n \"Touchdown\": models.Touchdown,\n \"Undocked\": models.Undocked,\n # Combat\n \"Bounty\": models.Bounty,\n \"CapShipBond\": models.CapShipBond,\n \"Died\": models.Died,\n \"EscapeInterdiction\": models.EscapeInterdiction,\n \"FactionKillBond\": models.FactionKillBond,\n \"HeatDamage\": models.HeatDamage,\n \"HeatWarning\": models.HeatWarning,\n \"HullDamage\": models.HullDamage,\n \"Interdicted\": models.Interdicted,\n \"Interdiction\": models.Interdiction,\n \"PVPKill\": models.PVPKill,\n \"ShieldState\": models.ShieldState,\n # Exploration\n \"Scan\": models.Scan,\n \"MaterialCollected\": models.MaterialCollected,\n \"MaterialDiscarded\": models.MaterialDiscarded,\n \"MaterialDiscovered\": models.MaterialDiscovered,\n \"BuyExplorationData\": models.BuyExplorationData,\n \"SellExplorationData\": models.SellExplorationData,\n \"Screenshot\": models.Screenshot,\n # Trade\n \"BuyTradeData\": models.BuyTradeData,\n \"CollectCargo\": models.CollectCargo,\n \"EjectCargo\": models.EjectCargo,\n \"MarketBuy\": models.MarketBuy,\n \"MarketSell\": models.MarketSell,\n \"MiningRefined\": models.MiningRefined,\n # Station Services\n \"BuyAmmo\": models.BuyAmmo,\n \"BuyDrones\": models.BuyDrones,\n \"CommunityGoalDiscard\": models.CommunityGoalDiscard,\n \"CommunityGoalJoin\": models.CommunityGoalJoin,\n \"CommunityGoalReward\": models.CommunityGoalReward,\n \"CrewAssign\": models.CrewAssign,\n \"CrewFire\": models.CrewFire,\n \"CrewHire\": models.CrewHire,\n \"EngineerApply\": models.EngineerApply,\n \"EngineerCraft\": models.EngineerCraft,\n \"EngineerProgress\": models.EngineerProgress,\n \"FetchRemoteModule\": models.FetchRemoteModule,\n \"MassModuleStore\": models.MassModuleStore,\n \"MissionAbandoned\": models.MissionAbandoned,\n \"MissionAccepted\": models.MissionAccepted,\n \"MissionCompleted\": models.MissionCompleted,\n \"MissionFailed\": models.MissionFailed,\n \"ModuleBuy\": models.ModuleBuy,\n \"ModuleRetrieve\": models.ModuleRetrieve,\n \"ModuleSell\": models.ModuleSell,\n \"ModuleSellRemote\": models.ModuleSellRemote,\n \"ModuleStore\": models.ModuleStore,\n \"ModuleSwap\": models.ModuleSwap,\n \"PayFines\": models.PayFines,\n \"PayLegacyFines\": models.PayLegacyFines,\n \"RedeemVoucher\": models.RedeemVoucher,\n \"RefuelAll\": models.RefuelAll,\n \"RefuelPartial\": models.RefuelPartial,\n \"Repair\": models.Repair,\n \"RepairAll\": models.RepairAll,\n \"RestockVehicle\": models.RestockVehicle,\n \"ScientificResearch\": models.ScientificResearch,\n \"SellDrones\": models.SellDrones,\n \"ShipyardBuy\": models.ShipyardBuy,\n \"ShipyardNew\": models.ShipyardNew,\n \"ShipyardSell\": models.ShipyardSell,\n \"ShipyardTransfer\": models.ShipyardTransfer,\n \"ShipyardSwap\": models.ShipyardSwap,\n # Powerplay\n \"PowerplayCollect\": models.PowerplayCollect,\n \"PowerplayDefect\": models.PowerplayDefect,\n \"PowerplayDeliver\": models.PowerplayDeliver,\n \"PowerplayFastTrack\": models.PowerplayFastTrack,\n \"PowerplayJoin\": models.PowerplayJoin,\n \"PowerplayLeave\": models.PowerplayLeave,\n \"PowerplaySalary\": models.PowerplaySalary,\n \"PowerplayVote\": models.PowerplayVote,\n \"PowerplayVoucher\": models.PowerplayVoucher,\n # Other Events\n \"ApproachSettlement\": models.ApproachSettlement,\n \"CockpitBreached\": models.CockpitBreached,\n \"CommitCrime\": models.CommitCrime,\n \"Continued\": models.Continued,\n \"DatalinkScan\": models.DatalinkScan,\n \"DatalinkVoucher\": models.DatalinkVoucher,\n \"DataScanned\": models.DataScanned,\n \"DockFighter\": models.DockFighter,\n \"DockSRV\": models.DockSRV,\n \"FuelScoop\": models.FuelScoop,\n \"JetConeBoost\": models.JetConeBoost,\n \"JetConeDamage\": models.JetConeDamage,\n \"LaunchFighter\": models.LaunchFighter,\n \"LaunchSRV\": models.LaunchSRV,\n \"Promotion\": models.Promotion,\n \"RebootRepair\": models.RebootRepair,\n \"ReceiveText\": models.ReceiveText,\n \"Resurrect\": models.Resurrect,\n \"SelfDestruct\": models.SelfDestruct,\n \"SendText\": models.SendText,\n \"Synthesis\": models.Synthesis,\n \"USSDrop\": models.USSDrop,\n \"VehicleSwitch\": models.VehicleSwitch,\n \"WingAdd\": models.WingAdd,\n \"WingJoin\": models.WingJoin,\n \"WingLeave\": models.WingLeave,\n }\n return switcher.get(data[\"event\"], models.BaseModel)", "def is_software(self):\n return self._is_name_type(self.SOFTWARE)", "def all_net(configuration):\n net_dict_all = {\n \"design\" : ['H1', 'L1', 'V1' ],\n \"GW170817\" : ['H1', 'L1', 'V1' ],\n \"GW170814\" : ['H1', 'L1', 'V1' ],\n \"GW170817_without_Virgo\" : ['H1', 'L1' ],\n \"ET\" : [\"ET_L_Eu\", \"ET_L_Eu_2\"], # Triangular ET\n \"ET1\" : ['H1', 'L1', 'V1', 'ETdet1', 'ETdet2' ], # Triangular ET +LVC\n \"ET2\" : ['H1', 'L1', 'V1', 'ETdet1', 'ETdet3' ], # L-shaped at 2 places +LVC\n \"ET3\" : ['ETdet1', 'ETdet3', 'ETdet4'], # 3 L-shaped ET at three different places\n \"ET3L_EU\" : [\"ET_L_Eu\", \"ET_L_Aus_Eu\", \"ET_L_Argentina\"],\n \"3ET\" : [\"ET_L_US\", \"ET_L_Aus_US\", \"ET_L_Central_Africa\"],\n \"3CE\" : [\"CE_US\", \"CE_Aus_US\", \"CE_Central_Africa\"],\n \"1CE-ET\" : [\"CE_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"2CE-ET\" : [\"CE_US\", \"CE_Aus_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"], #named 1 and 2 to distinguish from CE-ET (below) in Mills et al 2018.\n \"CE-ET\" : [\"CE_US\", \"CE_Aus_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"Voyager-ET\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n # next three networks are for calculating the impact of duty cycle on the Voyager-ET network\n \"VoyagerLI-ET\" : [\"LBB_L1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"VoyagerHI-ET\" : [\"LBB_H1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"VoyagerHL-ET\" : [\"LBB_H1\", \"LBB_L1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \n \"VoyagerETtri\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ET_Tri_Eu_1\", \"ET_Tri_Eu_2\", \"ET_Tri_Eu_3\"],\n \"Voyager\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\"],\n \"VoyagerWithAL\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ALV1\", \"ALK1\"],\n \"3_TriangularET\" : [\"ET_L_US\", \"ET_L_Aus_US\", \"ET_L_Central_Africa\",\"ET_L_US_2\", \"ET_L_Aus_US_2\", \"ET_L_Central_Africa_2\"],\n # for comparing to klimenko et al 2011:\n 'LHVA2' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A-\"],\n 'LHVA' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A\"],\n 'LHVJ' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_K1\"],\n 'LHVAJ' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A\",\"LBB_K1\"],\n # for calculating alignment factor distributions in inclincation paper\n \"HL\" : [\"H1\", \"L1\"],\n \"HLV\" : [\"H1\", \"L1\", \"V1\" ],\n \"HLVK\" : [\"L1\",\"H1\",\"V1\",\"K1\"],\n \"HLVKI\" : [\"L1\",\"H1\",\"V1\",\"K1\", \"I1\"],\n \n\n #for optimizing the orientations of ET3L_EU w.r.t. polarization metric (see optimizing polarization notebook)\n #first optimize for the two detector network:\n \"ET2L_EU\" : [\"ET_L_Eu\", \"ET_L_Aus_Eu\"],\n \"2ET\" : [\"ET_L_US\", \"ET_L_Aus_US\"],\n #ranges\n }\n return(net_dict_all[configuration])", "def test_get_software(self):\n pass", "def test_get_software_set(self):\n pass", "def evaluate_dep_type_sets():\n strategies = {\n 'defensive': ['agent', 'advcl', 'parataxis'],\n 'aggressive': ['agent', 'advcl', 'parataxis', 'dep', 'aux', 'ccomp', 'xcomp', 'dobj', 'pobj', 'nsubj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'preconj', 'advmod', 'neg', 'rcmod', 'tmod', 'poss', 'prepc'],\n 'compromise_1': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc'],\n 'compromise_2': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc', 'attr', 'csubj', 'csubjpass', 'number', 'possessive', 'punct', 'ref']\n }\n results = {'classification':{}, 'retrieval':{}}\n\n print '------ CLASSIFICATION EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/tasa/TASA900_dependencies'\n texts, labels = data.read_files(descriptions_path)\n print '> Creating representations..'\n rep = {}\n for strategy in strategies:\n rep[strategy] = []\n metric = graph.GraphMetrics.CLOSENESS\n for i, text in enumerate(texts):\n if i%10==0: print ' ',str(i)+'/'+str(len(texts))\n for strategy in strategies:\n g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])\n d = graph_representation.graph_to_dict(g, metric)\n rep[strategy].append(d)\n g = None # just to make sure. I don't trust this damn garbage collector...\n for strategy in strategies:\n rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])\n print '> Evaluating..'\n for strategy in strategies:\n score = evaluation.evaluate_classification(rep[strategy], labels)\n print ' ', strategy, score\n results['classification'][strategy] = score\n\n data.pickle_to_file(results, 'output/dependencies/types_set_eval_tmp')\n\n print '------ RETRIEVAL EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/air/problem_descriptions_dependencies'\n description_texts, labels = data.read_files(descriptions_path)\n solutions_path = '../data/air/solutions_preprocessed'\n solution_texts, labels = data.read_files(solutions_path)\n solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)\n print '> Creating representations..'\n rep = {}\n for strategy in strategies:\n rep[strategy] = []\n metric = graph.GraphMetrics.EIGENVECTOR\n for i, text in enumerate(description_texts):\n if i%1==0: print ' ',str(i)+'/'+str(len(description_texts))\n full_graph = graph_representation.construct_dependency_network(text)\n for strategy in strategies:\n g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])\n d = graph_representation.graph_to_dict(g, metric)\n rep[strategy].append(d)\n g = None # just to make sure..\n full_graph = None\n #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))\n for strategy in strategies:\n rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])\n print '> Evaluating..'\n for strategy in strategies:\n score = evaluation.evaluate_retrieval(rep[strategy], solution_vectors)\n print ' ', strategy, score\n results['retrieval'][strategy] = score\n\n pp.pprint(results)\n data.pickle_to_file(results, 'output/dependencies/types_set_eval')\n\n return results", "def get_all_tools(project, user_paths, tool_type='synthesis'):\n if tool_type == 'synthesis':\n registry = synthesis_tool_class_registry\n elif tool_type == 'simulation':\n registry = simulation_tool_class_registry\n else:\n log.error(\n 'Invalid tool type specified: {0}'.format(tool_type) +\n ' Use one of [simulation, synthesis]'\n )\n return None\n\n tools = {}\n for toolname, inst_fn in registry.items():\n try:\n inst = inst_fn(project, user_paths)\n if not inst.installed:\n log.warning(\n toolname.capitalize() +\n ' ' + tool_type + ' tool' +\n ' could not be found.' +\n ' Update .chiptoolsconfig or your PATH variable'\n )\n tools[toolname] = inst\n except:\n # Error instancing this tool.\n log.error(\n 'Encountered an error when loading tool wrapper: ' +\n toolname\n )\n log.error(traceback.format_exc())\n return tools" ]
[ "0.5966972", "0.55872685", "0.5425678", "0.53959244", "0.528284", "0.52208155", "0.5148771", "0.5053533", "0.5021349", "0.50063014", "0.49968717", "0.49807563", "0.49731576", "0.4966501", "0.49594882", "0.49388883", "0.49147922", "0.4914047", "0.49095652", "0.4860251", "0.48527294", "0.48527294", "0.48432553", "0.483942", "0.48282656", "0.48251945", "0.47368824", "0.4713036", "0.47116908", "0.471122" ]
0.8044187
0
Prepares letmehear to for audio processing. `source_path` Absolute or relative to the current directory path, containing audio file(s) or subdirectories with audio file(s) to process. `dest_path` Absolute or relative to the current directory path to store output files in. If None, output files are saved in `letmehear` directory in the same directory as input file(s). `use_logging` Defines the verbosity level of letmehear. All messages produced by the application are logged with `logging` module.
def __init__(self, source_path, dest_path=None, use_logging=logging.INFO): self.path_source = os.path.abspath(source_path) self.path_target = dest_path if use_logging: self._configure_logging(use_logging) logging.info('Source path: %s', self.path_source) if not os.path.exists(self.path_source): raise LetMeError('Path "%s" is not found.' % self.path_source) if dest_path is not None: self.path_target = os.path.abspath(dest_path) os.chdir(self.path_source)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_audio(a_name, target=False):\n samprate = 16000 # Sampling Rate\n length = 16 # Amount of blocks for 1 walkthrough\n overlap = 8 # Step between samples in amount of blocks\n fft = 1024 # Length of block (64ms)\n\n # Upload and preparing data sets\n # audio_path = \"raw_data_wav/\"\n # full_a_name = audio_path + a_name\n print('loading %s' % a_name)\n audio, _ = lr.load(a_name, sr=samprate)\n audio = filter_audio(audio) # Removing silence and spaces between words\n data = lr.stft(audio, n_fft=fft).swapaxes(0, 1) # Export spectrogram\n samples = []\n\n for i in range(0, len(data) - length, overlap):\n samples.append(np.abs(data[i:i + length])) # Create training sample\n\n results_shape = (len(samples), 1)\n results = np.ones(results_shape) if target else np.zeros(results_shape)\n\n return np.array(samples), results", "def setup(self):\n if (((not os.path.isfile(self.tempfile))\n or (os.stat(self.tempfile).st_size == 0))):\n self.write_ply(self.tempfile)\n skip_start = False\n if self.inst_kwargs.get('src_type', 1) == 0:\n skip_start = True\n super(TestCisPlyInput, self).setup(skip_start=skip_start)", "def set_audio_output_source(self) -> None:\n # noinspection PyUnresolvedReferences\n self.media.set_audio_output(self.audio_output_source[0])", "def prepare_for_training(self, target, sample_rate=16000, nested=False):\n if nested:\n af_target_file = os.path.join(\n target, \"sph\", basename(self.audio_file.location)\n )\n tf_target_file = os.path.join(\n target, \"stm\", basename(self.transcript_file.location)\n )\n else:\n af_target_file = os.path.join(target, basename(self.audio_file.location))\n tf_target_file = os.path.join(\n target, basename(self.transcript_file.location)\n )\n\n af = self.audio_file.prepare_for_training(\n af_target_file,\n sample_rate=sample_rate,\n )\n\n tf = self.transcript_file.write(tf_target_file)\n\n return (\n exemplar({\"audio_file\": af, \"transcript_file\": tf})\n if all([af, tf])\n else None\n )", "def sox_chop_source_audio(self, source_filename, part_length, backshift=0):\n logging.info('Preparing for source file chopping ...')\n\n wav_length = self.sox_get_audio_length(source_filename)\n if wav_length <= part_length:\n parts_count = 1\n else:\n # Calculate audio length with one second back shift. Also known as possum formula %)\n parts_count = int(round(wav_length / float(part_length - backshift), 0))\n parts_count_len = len(str(parts_count))\n\n logging.info('Chopping information:\\n'\n ' Source file length: %(source)s second(s)\\n'\n ' Requested part length: %(part)s second(s)\\n'\n ' Backshift: %(back)s second(s)\\n'\n ' Parts count: %(parts_cnt)s',\n source=wav_length,\n part=part_length,\n parts_cnt=parts_count,\n back=backshift)\n\n logging.info('Starting chopping ...')\n for index in range(0, parts_count):\n start_pos = index * part_length\n if start_pos > 0:\n # We need to shift all but the first part for `backshift` seconds backward\n # to not to loose some phrases on chopping.\n start_pos -= (index * backshift)\n part_number = str(index + 1).rjust(parts_count_len, '0')\n\n # This will strip ID3Tags from file - they are identical thus uninformative.\n comment = '--comment \"\"'\n\n target = part_number\n logging.info(\n 'Working on %s.mp3 [%s/%s - %s%%] ...',\n target,\n int(part_number),\n parts_count,\n int(int(part_number) * 100 / parts_count)\n )\n command = 'sox -V1 \"%(source)s\" %(comment)s %(target)s.mp3 trim %(start_pos)s %(length)s' % {\n 'source': source_filename,\n 'target': target,\n 'start_pos': start_pos,\n 'length': part_length,\n 'comment': comment\n }\n self._process_command(command, PIPE)\n logging.info('Chopped.\\n')", "def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray],\n source_sr: Optional[int] = None):\n # Load the wav from disk if needed\n if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):\n wav, source_sr = librosa.load(fpath_or_wav, sr=None)\n else:\n wav = fpath_or_wav\n\n # Resample the wav if needed\n if source_sr is not None and source_sr != sampling_rate:\n wav = librosa.resample(wav, source_sr, sampling_rate)\n\n # Apply the preprocessing: normalize volume and shorten long silences\n wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)\n\n return wav", "def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs", "def __init__(self, path: str, verbose: bool = False):\n self.path = path\n self.output_path = os.path.join(\n os.path.dirname(self.path), os.pardir, \"source_data\",\n \"original_tweets_with_lemmas.p\"\n )\n self.verbose = verbose", "def pack_audio_files_to_hdf5(args):\n\n # Arguments & parameters\n dataset_dir = args.dataset_dir\n workspace = args.workspace\n data_type = args.data_type\n mini_data = args.mini_data\n\n sample_rate = config.sample_rate\n audio_length = config.audio_length\n classes_num = config.classes_num\n lb_to_idx = config.lb_to_idx\n frames_per_second = config.frames_per_second\n frames_num = frames_per_second * config.audio_duration\n\n has_strong_target = data_type in ['testing', 'evaluation']\n\n # Paths\n audios_dir = os.path.join(dataset_dir, data_type)\n weak_label_csv_path = os.path.join(dataset_dir, 'metadata', \n get_weak_csv_filename(data_type))\n\n if data_type == 'testing':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_testing_set.csv')\n elif data_type == 'evaluation':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_evaluation_set.csv')\n\n if mini_data:\n packed_hdf5_path = os.path.join(workspace, 'features', \n 'minidata_{}.waveform.h5'.format(data_type))\n else:\n packed_hdf5_path = os.path.join(workspace, 'features', \n '{}.waveform.h5'.format(data_type))\n create_folder(os.path.dirname(packed_hdf5_path))\n\n # Read metadata\n weak_meta_list = read_weak_csv(weak_label_csv_path, data_type)\n\n # Use a small amount of data for debugging\n if mini_data:\n random.seed(1234)\n random.shuffle(weak_meta_list)\n weak_meta_list = weak_meta_list[0 : 100]\n\n audios_num = len(weak_meta_list)\n\n feature_time = time.time()\n with h5py.File(packed_hdf5_path, 'w') as hf:\n hf.create_dataset(\n name='audio_name', \n shape=(audios_num,), \n dtype='S80')\n\n hf.create_dataset(\n name='waveform', \n shape=(audios_num, audio_length), \n dtype=np.int32)\n\n hf.create_dataset(\n name='weak_target', \n shape=(audios_num, classes_num), \n dtype=np.float32)\n\n if has_strong_target:\n strong_meta_dict = read_strong_csv(strong_label_csv_path) \n \n hf.create_dataset(\n name='strong_target', \n shape=(0, frames_num, classes_num), \n maxshape=(None, frames_num, classes_num), \n dtype=np.bool)\n\n for n in range(audios_num):\n print(n)\n weak_meta_dict = weak_meta_list[n]\n audio_name = weak_meta_dict['audio_name']\n audio_path = os.path.join(audios_dir, audio_name)\n (audio, fs) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n audio = pad_truncate_sequence(audio, audio_length)\n\n hf['audio_name'][n] = audio_name.encode()\n hf['waveform'][n] = float32_to_int16(audio)\n hf['weak_target'][n] = weak_target = get_weak_target(\n weak_meta_dict['labels'], lb_to_idx)\n\n if has_strong_target:\n strong_target = get_strong_target(\n weak_meta_dict['audio_name'][1:], strong_meta_dict, \n frames_num, frames_per_second, lb_to_idx)\n \n hf['strong_target'].resize((n + 1, frames_num, classes_num))\n hf['strong_target'][n] = strong_target\n\n print('Write hdf5 to {}'.format(packed_hdf5_path))\n print('Time: {:.3f} s'.format(time.time() - feature_time))", "def test_prepare_l8_l1_tarball_with_source(\n tmp_path: Path, l1_ls8_folder: Path, ls8_telemetry_path, l1_ls8_ga_expected: Dict\n):\n assert l1_ls8_folder.exists(), \"Test data missing(?)\"\n\n output_path = tmp_path\n expected_metadata_path = (\n output_path\n / \"090\"\n / \"084\"\n / \"LC08_L1TP_090084_20160121_20170405_01_T1.odc-metadata.yaml\"\n )\n check_prepare_outputs(\n invoke_script=landsat_l1_prepare.main,\n run_args=[\n \"--output-base\",\n output_path,\n \"--producer\",\n \"ga.gov.au\",\n \"--source\",\n ls8_telemetry_path,\n l1_ls8_folder,\n ],\n expected_doc=l1_ls8_ga_expected,\n expected_metadata_path=expected_metadata_path,\n )", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def __init__(self, language, datasource, source_file, source_path,\n target_path, pipeline_config, shuffle_file):\n self.language = language\n self.datasource = datasource\n self.source_file = source_file\n self.source_path = source_path\n self.target_path = target_path\n self.location = target_path\n self.pipeline_config = pipeline_config\n self.shuffle_file = shuffle_file\n self.data_path = os.path.join(self.target_path, 'data')\n self.conf_path = os.path.join(self.target_path, 'config')\n self.file_list = os.path.join(self.conf_path, FNAME_FILELIST)\n if self.source_file is not None or self.source_path is not None:\n self._initialize_directory()", "def setup(outpath):\n time = datetime.now().strftime(\"%d_%m_%Y_%H_%M_%S\")\n temp = os.path.join(outpath, \"data\", \"temp\")\n result = os.path.join(outpath, \"results\")\n logs = os.path.join(outpath, \"logs\")\n download = os.path.join(outpath, \"data\", \"download\")\n chromsizes = os.path.join(outpath,\n \"data\", \"chromsizes\")\n if not os.path.exists(download):\n os.makedirs(download)\n if not os.path.exists(temp):\n os.makedirs(temp)\n if not os.path.exists(result):\n os.makedirs(result)\n if not os.path.exists(logs):\n os.makedirs(logs)\n if not os.path.exists(chromsizes):\n os.makedirs(chromsizes)\n\n logname = time + \"_tfanalyzer.log\"\n logfile = os.path.join(logs, logname)\n logging.basicConfig(filename=logfile, level=logging.INFO)\n return logfile", "def lemon_bidscoin_prepare(src_path):\n lemon_prepare()\n this_dir = os.path.dirname(__file__)\n data_dir = os.path.join(this_dir,'..','_data')\n root_path = os.path.abspath(os.path.join(data_dir,'lemon'))\n bidscoin_input_path = src_path\n\n os.makedirs(bidscoin_input_path,exist_ok=True)\n\n files = _get_files(root_path)\n files = [x for x in files if x.split('.')[-1] in ['eeg','vmrk','vhdr'] ]\n\n files_out = []\n for f in files:\n session = 'ses-001'\n task = 'resting'\n head,tail=os.path.split(f)\n sub = tail.split('.')[0]\n new_path = os.path.join(bidscoin_input_path,sub,session,task,tail)\n files_out.append(new_path)\n\n for old,new in zip(files,files_out):\n print(old,' to ',new)\n os.makedirs(os.path.split(new)[0], exist_ok=True)\n if not os.path.isfile(new):\n shutil.copy2(old,new)\n else:\n print('already done, skipping...')\n print('finish')", "def _prepare_raw_data(kwargs):\n path = kwargs.get(\"path\", None)\n output_path = kwargs.get(\"output_path\", None)\n data_source = DataSource.best_available_data_source()\n for job in data_source.jobs(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_job(data=job, path=output_path)\n for traffic in data_source.traffics(\n source=\"raw\", path=path, data_path=output_path, stateful=False):\n data_source.write_traffic(data=traffic, path=output_path)", "def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):\n inspec = None\n inspecz = np.nan\n inspecmag = np.nan\n inspecpb = None\n\n source_table_file = os.path.join('sources', 'sourcetable.txt')\n source_table_file = io.get_pkgfile(source_table_file)\n source_table = at.Table.read(source_table_file, format='ascii')\n ind = (source_table['specname'] == source)\n nmatch = len(source_table['specname'][ind])\n if nmatch == 1:\n # load the file and the info\n inspec = source_table['specname'][ind][0]\n inspecz = source_table['redshift'][ind][0]\n inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag\n elif nmatch == 0:\n message = 'Spectrum {} not listed in lookup table'.format(source)\n pass\n else:\n message = 'Spectrum {} not uniquely listed in lookup table'.format(source)\n pass\n\n if inspec is None:\n warnings.warn(message, RuntimeWarning)\n inspec = source\n inspecz = sourcez\n inspecmag = sourcemag\n inspecpb = sourcepb\n\n if not os.path.exists(inspec):\n message = 'Spectrum {} could not be found'.format(inspec)\n raise ValueError(message)\n\n try:\n spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')\n except Exception as e:\n message = 'Could not read file {}'.format(source)\n raise ValueError(message)\n\n if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):\n pass\n else:\n pbs = passband.load_pbs([inspecpb], 0.)\n try:\n inspecpb = pbs[inspecpb][0]\n except KeyError as e:\n message = 'Could not load passband {}'.format(inspecpb)\n raise RuntimeError(message)\n\n try:\n inspecmag = float(inspecmag)\n except (TypeError, ValueError) as e:\n message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)\n raise ValueError(message)\n\n try:\n inspecz = float(inspecz)\n except (TypeError, ValueError) as e:\n message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)\n raise ValueError(message)\n\n if inspecz < 0 :\n message = 'Source must have positive definite cosmological redshift'\n raise ValueError(message)\n\n inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')\n try:\n inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)\n inspec.convert('flam')\n except Exception as e:\n message = 'Could not renormalize spectrum {}'.format(inspec)\n raise RuntimeError(message)\n\n if inspecz > 0:\n zblue = 1./(1+inspecz) - 1.\n inspec_rest = inspec.redshift(zblue)\n inspec_rest.convert('flam')\n c = default_cosmology.get()\n mu = c.distmod(inspecz)\n out = inspec_rest*(10.**(0.4*mu.value))\n else:\n out = inspec\n # TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is\n # and renormalizes - there's some sanity checking for overlaps\n # we can do this without using it and relying on the .passband routines\n return out", "def prepare_for_training(self, target=None, nested=False, sample_rate=16000):\n\n # write corpus back in place if no target\n target = self.location if target is None else target\n\n executor = ThreadPoolExecutor()\n\n # process audio files concurrently for speed\n futures = [\n executor.submit(\n partial(\n _.prepare_for_training,\n target=target,\n sample_rate=sample_rate,\n nested=nested,\n )\n )\n for _ in self.exemplars\n ]\n\n # trigger conversion and gather results\n new_exemplars = [future.result() for future in tqdm(futures)]\n\n new_corpus = corpus(\n {\n \"location\": target,\n \"exemplars\": [eg for eg in new_exemplars if eg is not None],\n }\n )\n new_corpus.validate()\n return new_corpus.log()", "def distribute(self, source, dest, volume, allow_carryover=False,\n mix_before=False, mix_vol=None, repetitions=10,\n flowrate=\"100:microliter/second\"):\n opts = {}\n dists = self.fill_wells(dest, source, volume)\n groups = []\n for d in dists:\n opts = {}\n if mix_before:\n if not mix_vol:\n raise RuntimeError(\"No mix volume specified for \"\n \"mix_before\")\n opts[\"mix_before\"] = {\n \"volume\": mix_vol,\n \"repetitions\": repetitions,\n \"speed\": flowrate\n }\n if allow_carryover:\n opts[\"allow_carryover\"] = allow_carryover\n opts[\"from\"] = d[\"from\"]\n opts[\"to\"] = d[\"to\"]\n groups.append(\n {\"distribute\": opts}\n )\n\n self.pipette(groups)", "def hear(self, recursive=False):\n if self.path_target is not None and not os.path.exists(self.path_target):\n self._create_target_path(self.path_target)\n\n files_dict = self.filter_target_extensions(self.get_dir_files(recursive))\n\n paths = sorted(files_dict.keys())\n for path in paths:\n logging.info('%s\\n Working on: %s\\n', '====' * 10, path)\n\n if self.path_target is None:\n # When a target path is not specified, create `letmehear` subdirectory\n # in every directory we are working at.\n target_path = os.path.join(path, 'letmehear')\n else:\n # When a target path is specified, we create a subdirectory there\n # named after the directory we are working on.\n target_path = os.path.join(self.path_target, os.path.split(path)[1])\n\n self._create_target_path(target_path)\n logging.info('Target (output) path: %s', target_path)\n\n source_filename = os.path.join(target_path, self._source_filename)\n self.process_source_file(path, files_dict[path], source_filename)\n\n logging.info('We are done now. Thank you.\\n')", "def remix(self):\n self.log(\"Looking up track...\", 5)\n self.getTag()\n self.processArt()\n\n self.log(\"Listening to %s...\" % ('\"%s\"' % self.tag['title'] if 'title' in self.tag else 'song'), 5)\n self.original = audio.LocalAudioFile(self.infile, False)\n if not 'title' in self.tag:\n self.detectSong(self.original)\n self.st = FastModify()\n \n self.log(\"Choosing key and tempo...\", 10)\n self.tonic = self.original.analysis.key['value']\n self.tempo = self.original.analysis.tempo['value']\n self.bars = self.original.analysis.bars\n self.beats = self.original.analysis.beats\n self.sections = self.original.analysis.sections\n self.tag['key'] = self.keys[self.tonic] if self.tonic >= 0 and self.tonic < 12 else '?'\n self.tag['tempo'] = self.template['tempo']\n\n self.log(\"Arranging intro...\", 40.0/(len(self.sections) + 1))\n self.partialEncode(self.compileIntro())\n\n past_progress = 0\n hats = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n\n i = 0 # Required if there are no sections\n for i, section in enumerate(self.sections):\n self.log(\"Arranging section %s of %s...\" % (i+1, len(self.sections)), 40.0/(len(self.sections) + 1))\n a, b = self.compileSection(i, section, hats)\n self.partialEncode(a)\n self.partialEncode(b)\n del a, b\n del hats\n self.original.unload()\n\n self.log(\"Adding ending...\", 5)\n self.partialEncode(\n audio.AudioData(\n self.sample_path + self.template['splash_ends'][(i + 1) % len(self.template['splash_ends'])],\n sampleRate=44100,\n numChannels=2,\n verbose=False\n )\n )\n \n self.log(\"Mixing...\", 5)\n self.mixwav(self.tempfile)\n\n if self.deleteOriginal:\n try:\n unlink(self.infile)\n except:\n pass # File could have been deleted by an eager cleanup script\n\n self.log(\"Mastering...\", 5)\n self.lame(self.tempfile, self.outfile)\n unlink(self.tempfile)\n \n self.log(\"Adding artwork...\", 20)\n self.updateTags(titleSuffix = \" (Wub Machine Remix)\")\n \n return self.outfile", "def write_sources(par, hdr, path='.', suffix=''):\n file = findpath('sesiflows.seistools') + '/' + 'specfem2d/SOURCE'\n with open(file, 'r') as f:\n lines = f.readlines()\n\n file = path + '/' + 'DATA/SOURCE' + suffix\n _writelines(file, lines)\n\n # adjust source coordinates\n setpar('xs', hdr.sx[0], file)\n setpar('zs', hdr.sy[0], file)\n setpar('ts', hdr.ts, file)\n\n # adjust source amplitude\n try:\n fs = float(getpar('factor', file))\n setpar('factor', str(fs*hdr.fs), file)\n except:\n pass\n\n # adjust source wavelet\n if 1:\n # Ricker wavelet\n setpar('time_function_type', 1, file)\n elif 0:\n # first derivative of Gaussian\n setpar('time_function_type', 2, file)\n elif 0:\n # Gaussian\n setpar('time_function_type', 3, file)\n elif 0:\n # Dirac\n setpar('time_function_type', 4, file)\n elif 0:\n # Heaviside\n setpar('time_function_type', 5, file)\n\n setpar('f0', par['F0'], file)", "def preprocess(args):\n \n # Set up options\n src = args.src\n dest = args.dest\n collect_path = args.collect_path\n formats = args.formats\n ref_img_path = args.ref_img_path\n width = args.width\n debug = args.debug\n if debug:\n print args.__dict__\n # Make necessary directories if there is not.\n if not os.path.exists(dest):\n os.mkdir(dest)\n if not os.path.exists(collect_path):\n os.mkdir(collect_path)\n\n # Open referce image and trying to find the face in it.\n try:\n ref_img_origin = Image.open(os.path.abspath(ref_img_path))\n except IOError as e:\n print \"[IOError] Can't open the reference imgae: {}\".format(ref_img_path)\n print \"[Info] Terminating....\"\n return 1\n\n face_ref_coor, degree_ref = segment_tools.faces_positions(ref_img_origin)\n \n # Only one face is allowed in referece image. Raise error if it isn't.\n # Crop the origin image to get the face image.\n if face_ref_coor.shape[0] > 1:\n raise MultiFaceError(\"Detect multiple faces in reference image. There should be only one face.\")\n face_ref = segment_tools.crop_img(ref_img_origin, face_ref_coor[0], offset = True)\n\n # Adjust that image to make eyes lie on horizontal line.\n try:\n eye_angle = face_align_tools.eyes_horizon_angle(face_ref)\n except segment_tools.NotDetectedError:\n print \"[NotDetectedError] This reference image is not good enough. The program can't make the eyes horizontal.\"\n print \"[NotDetectedError] Pleas use another reference image.\"\n print \"Terminating....\"\n return 1\n\n total_degree = eye_angle + degree_ref\n img_ref_rotated = ref_img_origin.rotate(total_degree, resample = Image.CUBIC)\n face_ref_coor, _ = segment_tools.faces_positions(img_ref_rotated)\n face_ref = segment_tools.crop_img(img_ref_rotated, face_ref_coor[0], offset = True)\n \n # Resize the reference face to desired witdh (but preserve the width/heigh ratio.)\n ref_width, ref_heigh = face_ref.size\n face_ref = face_ref.resize((width, ref_heigh*width/ref_width))\n if debug:\n face_ref.show()\n \n ref_file_name = os.path.basename(ref_img_path)\n face_ref.save(os.path.join(os.path.abspath(dest), \"ref_\" + ref_file_name))\n print \"[Info] Complete preprocess of reference image.\"\n\n # Walk through the source directory.\n print \"[Info] Start processing files in {src}.\".format(src = os.path.abspath(src))\n for rel_path, dir_names, file_names in os.walk(os.path.abspath(src)):\n for filename in file_names:\n if np.any(map(filename.endswith, formats)):\n file_path = os.path.join(os.path.abspath(rel_path), filename)\n print \"[Info] Start processing {file_path}.\".format(file_path = file_path)\n try:\n target_img_origin = Image.open(file_path)\n except IOError as e:\n print \"[IOError] Can not open {}\".format(file_path)\n print \"[Info] Passing this image.\"\n continue\n \n # Try to find faces in target image. If don't, copy it to collection directory.\n try:\n faces_target_coors, degree_target = segment_tools.faces_positions(target_img_origin)\n except segment_tools.NotDetectedError as e:\n print \"[NotDetectedError] Does not find any face in {filename}. Collect it into {collect_path}\".format(filename = filename, collect_path = collect_path)\n target_img_origin.save(os.path.join(os.path.abspath(collect_path), filename))\n continue # Brake loop for not finding any face in the picture.\n\n # Adjust all found faces to make them just.\n target_img_rotated = target_img_origin.rotate(degree_target, resample = Image.CUBIC)\n for face_coor in faces_target_coors:\n temp_img = segment_tools.crop_img(target_img_rotated, face_coor, offset=True)\n try:\n eyes_degree = face_align_tools.eyes_horizon_angle(temp_img)\n except segment_tools.NotDetectedError:\n eyes_degree = 0\n face_target = temp_img.rotate(eyes_degree)\n temp_file_name = random_prefix() + filename\n if debug:\n face_target.show()\n face_target.save(os.path.join(os.path.abspath(dest), temp_file_name))\n temp_aligned_file_name = \"aligned_\" + temp_file_name\n try:\n face_target_aligned = face_align_tools.face_align(face_ref, face_target)\n face_target_aligned.save(os.path.join(os.path.abspath(dest), temp_aligned_file_name))\n except segment_tools.NotDetectedError:\n print \"[AlignError] Can't align face. Moving to {collection}.\".format(collection = collect_path)\n face_target.save(os.path.join(os.path.abspath(collect_path), \"not_aligned_\" + temp_file_name))\n print \"[Info] Saving {}\".format(os.path.join(os.path.abspath(collect_path), \"not_aligned_\" + temp_file_name))\n continue\n masked_target_img = segment_tools.mask_img(target_img_rotated, faces_target_coors)\n\n if debug:\n masked_target_img.show()\n masked_target_img.save(\"masked.jpg\")\n \n try:\n while True:\n temp_face_coors, temp_degree = segment_tools.faces_positions(masked_target_img)\n temp_img = masked_target_img.rotate(temp_degree, resample = Image.CUBIC)\n if debug:\n print \"temp_face_coors\", temp_face_coors\n print \"[Info] Multiple faces are found in {file_path}\".format(file_path = file_path)\n for face_coor in temp_face_coors:\n temp_face = segment_tools.crop_img(temp_img, face_coor, offset = True)\n eye_angle = face_align_tools.eyes_horizon_angle(temp_face)\n face_target = temp_face.rotate(eye_angle, resample = Image.CUBIC)\n if debug:\n face_target.show()\n face_target_aligned = face_align_tools.face_align(face_ref, face_target)\n temp_file_name = random_prefix() + filename\n temp_aligned_file_name = \"aligned_\" + temp_file_name\n print \"[Info] Sucessful aligned {}\".format(temp_file_name)\n if debug:\n masked_target_img.show()\n except segment_tools.NotDetectedError:\n file_path = os.path.join(os.path.abspath(rel_path), filename)\n print \"[Info] Complete searching faces in {file_path}\".format(file_path = file_path)", "def load_jam_audio(\n jam_in, audio_file, validate=True, strict=True, fmt=\"auto\", **kwargs\n):\n\n if isinstance(jam_in, jams.JAMS):\n jam = jam_in\n elif jam_in is None:\n jam = jams.JAMS()\n else:\n jam = jams.load(jam_in, validate=validate, strict=strict, fmt=fmt)\n\n y, sr = librosa.load(audio_file, **kwargs)\n\n if jam.file_metadata.duration is None:\n jam.file_metadata.duration = librosa.get_duration(y=y, sr=sr)\n\n return jam_pack(jam, _audio=dict(y=y, sr=sr))", "def addSource(self,\n path,\n name,\n location,\n copyLib=False,\n copyGroups=False,\n copyInfo=False,\n copyFeatures=False,\n muteKerning=False,\n muteInfo=False,\n mutedGlyphNames=None,\n familyName=None,\n styleName=None,\n ):\n sourceElement = ET.Element(\"source\")\n sourceElement.attrib['filename'] = self._posixPathRelativeToDocument(path)\n sourceElement.attrib['name'] = name\n if copyLib:\n libElement = ET.Element('lib')\n libElement.attrib['copy'] = \"1\"\n sourceElement.append(libElement)\n\n if copyGroups:\n groupsElement = ET.Element('groups')\n groupsElement.attrib['copy'] = \"1\"\n sourceElement.append(groupsElement)\n\n if copyFeatures:\n featuresElement = ET.Element('features')\n featuresElement.attrib['copy'] = \"1\"\n sourceElement.append(featuresElement)\n\n if copyInfo or muteInfo:\n # copy info:\n infoElement = ET.Element('info')\n if copyInfo:\n infoElement.attrib['copy'] = \"1\"\n if muteInfo:\n infoElement.attrib['mute'] = \"1\"\n sourceElement.append(infoElement)\n\n if muteKerning:\n # add kerning element to the source\n kerningElement = ET.Element(\"kerning\")\n kerningElement.attrib[\"mute\"] = '1'\n sourceElement.append(kerningElement)\n\n if mutedGlyphNames:\n # add muted glyphnames to the source\n for name in mutedGlyphNames:\n glyphElement = ET.Element(\"glyph\")\n glyphElement.attrib[\"name\"] = name\n glyphElement.attrib[\"mute\"] = '1'\n sourceElement.append(glyphElement)\n\n if familyName is not None:\n sourceElement.attrib['familyname'] = familyName\n if styleName is not None:\n sourceElement.attrib['stylename'] = styleName\n\n\n locationElement = self._makeLocationElement(location)\n sourceElement.append(locationElement)\n self.root.findall('.sources')[0].append(sourceElement)", "def create_source(self, source):\n if not os.path.isdir(source):\n os.makedirs(source)\n # Create a text file in the source directory.\n text_file = os.path.join(source, 'notes.txt')\n with open(text_file, 'w') as handle:\n handle.write(\"This file should be included in the backup.\\n\")\n # Create a subdirectory in the source directory.\n subdirectory = os.path.join(source, 'subdirectory')\n os.mkdir(subdirectory)\n # Create a symbolic link in the subdirectory.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n os.symlink('../include-me.txt', symlink)", "def convert(self):\n #lame --mp3input --silent -h -b BITRATE SOURCE TARGET\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n msg('command', command)\n error = check_call(command)\n if error != 0:\n raise TaskError(subprocess.CalledProcessError)\n self.success = True", "def setup(zip_path, dest_path):\n\n #makes folder for zip files\n make_directory(zip_path)\n\n #makes folder for processed data\n make_directory(dest_path)", "def amsg_source_make(*args, **kwargs):\n return _uhd_swig.amsg_source_make(*args, **kwargs)", "def copy_source_files(self):\n\n LOGGER.info(f'start copying source files')\n count = 0\n for sfp in tqdm(sorted(self.source_fps), disable=self.disable_tqdm):\n try:\n meta = extract_law_meta(sfp)\n nodes = parse_xml_fp(sfp)\n tfp = self.stot(sfp)\n tfp.parent.mkdir(parents=True, exist_ok=True)\n save_law_tree(meta['LawTitle'], nodes, tfp)\n except Exception as e:\n LOGGER.error(f'failed to copy {sfp}: {e}')\n continue\n self.target_fps.add(tfp)\n LOGGER.debug(f'copied {sfp} to {tfp}')\n count += 1\n LOGGER.info(f'copied total {count} source files, now total {len(self.target_fps)} target files exist')", "def setup(self, newdir=None):\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n if newdir:\n _new = os.path.join(self.output_path, newdir)\n if not os.path.exists(_new):\n os.makedirs(_new)" ]
[ "0.55177593", "0.49116963", "0.47969633", "0.4784276", "0.4767295", "0.4715013", "0.45680743", "0.45591968", "0.45439425", "0.44847107", "0.4469214", "0.44230482", "0.43916404", "0.4383606", "0.4373705", "0.43536156", "0.43401626", "0.4333249", "0.43224534", "0.43181127", "0.4298544", "0.4296162", "0.4295173", "0.4293794", "0.42929357", "0.42870632", "0.42649668", "0.42445007", "0.42378172", "0.4226548" ]
0.54969025
1
Sets letmehear into dry run mode, when all requested actions are only simulated, and no changes are written to filesystem.
def set_dry_run(self): self._dry_run = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dry_run(self, dry_run):\n\n self._dry_run = dry_run", "def dry(self):\n self._dry = True\n return self", "def do_dry_run(self):\n self.loggit.info('DRY-RUN MODE. No changes will be made.')\n msg = f'DRY-RUN: Update cluster routing transient settings: {self.settings}'\n self.loggit.info(msg)", "def _doDryGripper(self):\n self._cmdDry(2)", "def server_dry_run(self, server_dry_run):\n\n self._server_dry_run = server_dry_run", "def is_dry_run(self):\n # Set this value to true if you want the entire operation to run, but not the ingestion.\n return os.environ.get('SNYK_DRY_RUN', 'false').lower() in ('1', 'yes', 'true')", "def dry_cargo(self, dry_cargo):\n\n self._dry_cargo = dry_cargo", "def can_dry_run(self):\r\n return False", "def dry_run(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"dry_run\")", "def dry_run(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"dry_run\")", "def dry_run(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"dry_run\")", "def testDryRun(self):\n\n\t\tself.testTooLong(dry_run=True)", "def is_dry_run():\n return 'DRY_RUN' in os.environ and os.environ['DRY_RUN']=='true'", "def is_dry_run(self):\n try:\n v = environment.get(\"Run\")\n return v.lower() == \"dry\"\n except KeyError:\n return False", "def test_main_dryrun():\n parser = r.buildparser()\n args = parser.parse_args(['--dryrun'])\n r.main(args)", "def _simulate(self, action=None):\n for k in range(int(self.SIMULATION_FREQUENCY // self.config[\"policy_frequency\"])):\n if action is not None and \\\n self.time % int(self.SIMULATION_FREQUENCY // self.config[\"policy_frequency\"]) == 0:\n # Forward action to the spacecraft\n self.spacecraft.act(self.ACTIONS[action])\n\n self.space.act()\n self.space.step(1 / self.SIMULATION_FREQUENCY)\n self.time += 1\n\n # Automatically render intermediate simulation steps if a viewer has been launched\n # Ignored if the rendering is done offscreen\n self._automatic_rendering()\n\n # Stop at terminal states\n if self.done or self._is_terminal():\n break\n self.enable_auto_render = False", "def test_dry_run():\n config = get_config(\"delete.conf\")\n path = get_config_path(config)\n test_file = make_test_file(path)\n\n console.pushbroom(config, dry_run=True)\n assert test_file.exists()\n\n console.pushbroom(config)\n assert not test_file.exists()\n\n path.rmdir()", "def do_craft(self, **kwargs):\n return None", "def force_option(args, run):\n run.force = True", "def can_dry_run(self, task: \"TaskView\") -> bool:\n return False", "def cmd_runner(self):\n if self.dryrun:\n return dryrun\n else:\n return None", "def set_automatic(self, mode):\n self.slam.controlled = not mode\n if mode:\n self.slam.resume()", "def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default", "def _run(self):\n logging.warning('-> perform EMPTY experiment...')", "def DisableByRunIf(self):\n self.run_if = 'False'", "def normal(self):\n self.run_command('normal')", "def setrestricted(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)", "def _simulate(self, action: Optional[Action] = None) -> None:\n frames = int(self.config[\"simulation_frequency\"] // self.config[\"policy_frequency\"])\n for frame in range(frames):\n # Forward action to the vehicle\n if action is not None \\\n and not self.config[\"manual_control\"] \\\n and self.steps % int(self.config[\"simulation_frequency\"] // self.config[\"policy_frequency\"]) == 0:\n self.action_type.act(action)\n\n self.road.act()\n self.road.step(1 / self.config[\"simulation_frequency\"])\n self.steps += 1\n\n # Automatically render intermediate simulation steps if a viewer has been launched\n # Ignored if the rendering is done offscreen\n if frame < frames - 1: # Last frame will be rendered through env.render() as usual\n self._automatic_rendering()\n\n self.enable_auto_render = False", "def run(self, dry=False):\n cache_dir = ub.ensure_app_cache_dir('sprokit', 'temp_pipelines')\n # TODO make a name based on a hash of the text to avoid race conditions\n pipe_fpath = join(cache_dir, 'temp_pipeline_file.pipe')\n self.write(pipe_fpath)\n run_pipe_file(pipe_fpath, dry=dry)", "def run(self, dry=False):\n ACT = self.act\n LOG = ACT.log\n\n LOG.log(\"Comparing real and randomized datasets to determine optimal MI\")\n generate_mi_histograms(ACT)\n\n self.sum = 0\n self.sumfpr = 0\n n = 0\n realmihist = ACT.real.mihist\n for d in ACT.datasets:\n if not d.real:\n outfile = \"{}.vs.{}.mi.hist.csv\".format(ACT.real.name, d.name)\n shmihist = d.mihist\n mi = compare_mi_histograms(outfile, realmihist, shmihist)\n LOG.log(\"Comparing {} and {}: maxDiff={} for mi={}, FPR={}\", realmihist, shmihist, mi[0], mi[1], mi[2])\n self.sum += mi[1]\n self.sumfpr += mi[2]\n n += 1\n\n self.optmi = self.sum / n\n self.fprmi = self.sumfpr / n\n\n LOG.log(\"MI threshold: {} (fpr={})\", self.optmi, self.fprmi)\n\n ## Now filter the adj files with this threshold\n nfilt = 0\n for d in ACT.datasets:\n LOG.log(\"Filtering dataset {} with MI={}\", d.name, self.optmi)\n script = d.write_filter_script(mi=self.optmi, log=LOG)\n if not dry:\n os.chdir(d.dirname)\n ACT.submit(script, done=\"../[email protected]\")\n os.chdir(\"..\")\n nfilt += 1\n ACT.wait((\"[email protected]\", nfilt))" ]
[ "0.6989833", "0.65439415", "0.6299042", "0.62920314", "0.6024881", "0.59835994", "0.59672785", "0.59232223", "0.58818495", "0.58818495", "0.58150184", "0.5740509", "0.55362505", "0.5323272", "0.52800596", "0.51863605", "0.51516193", "0.51297456", "0.50773555", "0.503785", "0.50312775", "0.5027144", "0.49888617", "0.49619427", "0.49028537", "0.48901057", "0.48876026", "0.48875198", "0.4878137", "0.48546436" ]
0.7023573
0
Creates and returns dictionary of files in source directory. `recursive` if True search is also performed within subdirectories.
def get_dir_files(self, recursive=False): logging.info('Enumerating files under the source path (recursive=%s) ...', recursive) files = {} if not recursive: files[self.path_source] = [ f for f in os.listdir(self.path_source) if os.path.isfile(os.path.join(self.path_source, f)) ] else: for current_dir, sub_dirs, dir_files in os.walk(self.path_source): files[os.path.join(self.path_source, current_dir)] = [f for f in dir_files] return files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_file_dict():\n import os\n file_dict = {}\n for root, dirs, files in os.walk('.'):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n for f in files:\n try:\n with open(f, 'r') as thing:\n res = thing.readline()\n except:\n res = ''\n file_name = os.path.join(root, f).lstrip('./')\n file_dict[file_name] = res\n return file_dict", "def scan(root: str, config: dict) -> dict:\n\n extensions = config['extensions']\n exceptions = config['exceptions']\n\n # get list of directories, with their contents (files and sub-directories)\n dirs_by_path = {\n get_path_from_common_root(dir_path, root): (\n dir_path,\n dir_names,\n [name for name in file_names if is_valid_file_name(name, config)]\n )\n for dir_path, dir_names, file_names in os.walk(root)\n }\n\n dirs_by_path = {key: value for key, value in dirs_by_path.items() if value[1] or value[2]}\n\n # get list of file paths\n file_path_list = [\n (file_dir, file_name)\n for file_dir, _, file_names in dirs_by_path.values()\n for file_name in file_names\n ]\n\n files_by_path = {}\n # todo: put below loop contents into function, used in above list comprehension\n for file_dir, file_name_with_extension in file_path_list:\n # todo: remove code file definition from here to allow this to be reused for non-code related files\n # todo: refactor to use inheritable File class instead of named tuple, to be reused for non-code files\n\n if file_name_with_extension in exceptions:\n continue\n\n file = CodeFile(\n imports=[],\n exports=[],\n local_params=[],\n blocks=[],\n dir=file_dir,\n name=file_name_with_extension.rsplit('.', 1)[0],\n extension=file_name_with_extension.rsplit('.', 1)[1]\n )\n\n parse_file(file, config)\n file_no_dupes = remove_duplicate_entries(file)\n\n dir_key = get_path_from_common_root(file_dir, root)\n if dir_key not in files_by_path:\n files_by_path[dir_key] = []\n\n files_by_path[dir_key].append(file_no_dupes)\n\n return {'dirs_by_path': dirs_by_path, 'files_by_path': files_by_path,\n 'starting_point': get_path_from_common_root(root, root)}", "def get_files_dict(folder_path, filter_term, recursive):\n if recursive:\n query = folder_path + '**/' + filter_term\n files_list = glob.glob(query, recursive=True)\n else:\n query = folder_path + filter_term\n files_list = glob.glob(query, recursive=False)\n files_list = [f for f in files_list if os.path.isfile(f)]\n files_dict = {f: get_timestamp(f) for f in files_list}\n return files_dict", "def get_files(current_dir, filename_pattern=\".*\"):\n files_dict = {}\n for root, dirs, files in os.walk(current_dir):\n files_dict.update(\n {filename: os.path.join(root, filename) for filename in files if re.match(filename_pattern, filename)}\n )\n return files_dict", "def _recursivelyFindFiles(self, topLevelDirectory, extension=\".py\"):\n print ('finding ' + extension + '...\\n')\n tempFilesFound = []\n tempSubDirs = {} #initialize temporary dictionary of sbudirectories\n \n for dirpath, dirnames, filenames in os.walk(topLevelDirectory):\n #print 'dirpath= ' + dirpath\n for filename in filenames:\n #check file extension and verify this is not a hidden file\n #also need to verify that the entity is a file (this avoids problems when directory names have file extensions)\n if filename[-len(extension):] == extension and filename[0] != '.' and os.path.isfile(dirpath+\"/\"+filename):\n #print 'filename = ' + dirpath +'/'+filename\n if dirpath == topLevelDirectory:\n tempFilesFound.append(dirpath+\"/\"+filename)\n else:\n #print '********* '\n #print dirpath\n tempSubDirs[dirpath] = True\n\n ##recursively search sub-directories\n #for dirname in dirnames:\n ##ignore directories with names that begin with a '.' or '_'\n #if dirname[0] != '.' and dirname[0] != '_':\n #self._findFiles(dirname, extension)\n \n #self.SubDirsFound=self.subdirs.keys()\n\n #in Python 3 dict.keys(), dict.values() and dict.items() will all return iterable views instead of lists \n if sys.version_info >= (3, 0):\n return (tempFilesFound, list(tempSubDirs.keys()))\n \n return (tempFilesFound, tempSubDirs.keys())", "def _get_recursive_files_and_stats(path: str) -> Dict[str, Tuple[float, int]]:\n files_stats = {}\n for root, dirs, files in os.walk(path, topdown=False):\n rel_root = os.path.relpath(root, path)\n for file in files:\n try:\n key = os.path.join(rel_root, file)\n stat = os.lstat(os.path.join(path, key))\n files_stats[key] = stat.st_mtime, stat.st_size\n except FileNotFoundError:\n # Race condition: If a file is deleted while executing this\n # method, just continue and don't include the file in the stats\n pass\n\n return files_stats", "def findFilesToCompress(sourcePath, includeMatch=None):\n toZip = {}\n if os.path.isdir(sourcePath):\n # Remove any trailing slash\n searchDir = os.path.normpath(sourcePath)\n for path in cake.filesys.walkTree(searchDir, includeMatch=includeMatch):\n toZip[os.path.normcase(path)] = path\n else:\n toZip[os.path.normcase(path)] = path\n \n return toZip", "def list_files_in_directory(self):\n lesson_file_dict = dict()\n lesson_file_dict[\"files\"] = []\n\n directory_list = listdir(self.sub_dir)\n for directory in directory_list:\n if isfile(join(self.sub_dir, directory)):\n lesson_file_dict[\"files\"].append(directory)\n\n return lesson_file_dict", "def repo_fs():\n for root, dirs, files in os.walk(\".\"):\n dirs[:] = [ # add any extra dirs to ignore #\n d for d in dirs\n if '.' not in d\n and 'ENV' not in d\n and '__' not in d\n and 'build' not in d\n ]\n\n for f in files:\n if f.endswith(\".py\"):\n if not f.startswith('__'):\n ALL_PY_FILES.append(os.path.join(root, f))\n PY_FILES.append(os.path.join(root, f))\n if f.endswith(\".yml\"):\n YML_FILES.append(os.path.join(root, f))\n if f.startswith(\"requirements\"):\n PIP_FILES.append(os.path.join(root, f))\n if f.startswith(\"development\"):\n DEV_FILES.append(os.path.join(root, f))\n if f.startswith(\"README.md\"):\n README_FILES.append(os.path.join(root, f))\n if f.startswith(\"LICENSE\"):\n LICENSE.append(os.path.join(root, f))\n if f.startswith(\"CONTRIBUTIONS\"):\n CONTRIBUTIONS.append(os.path.join(root, f))\n\n if PY_FILES:\n parse_files()\n\n return { # dictionary with all lists of file path/names #\n 'PY_FILES': PY_FILES,\n 'YML_FILES': YML_FILES,\n 'PIP_FILES': PIP_FILES,\n 'README_FILES': README_FILES,\n 'TEST_FILES': TEST_FILES,\n 'LICENSE': LICENSE,\n 'URL_FILES': URL_FILES,\n 'CONTRIBUTIONS': CONTRIBUTIONS,\n 'SETUP_FILES': SETUP_FILES,\n 'MODEL_FILES': MODEL_FILES,\n 'SETTINGS_FILES': SETTINGS_FILES,\n 'DEV_FILES': DEV_FILES,\n }", "def _recurse(self, path):\n files = {}\n empty_dirs = []\n try:\n sub_paths = os.listdir(path)\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n # Path does not exist\n sys.stderr.write(\"{} does not exist\\n\".format(path))\n sys.exit(42)\n elif exc.errno in (errno.EINVAL, errno.ENOTDIR):\n # Path is a file (EINVAL on Windows, ENOTDIR otherwise)\n files[path] = self._mode(path)\n else:\n if not sub_paths:\n empty_dirs.append(path)\n for fn_ in sub_paths:\n files_, empty_dirs_ = self._recurse(os.path.join(path, fn_))\n files.update(files_)\n empty_dirs.extend(empty_dirs_)\n\n return files, empty_dirs", "def recursive_rastersstats_to_dict(path, fn_regex=r'*2018.tif'):\n\n # Initialize empty dictionary\n rstr_dict = {}\n # Get rasters that in dir and all subdirs that match pattern\n for f in glob.iglob(os.path.join(path, '**', fn_regex), recursive=True):\n rstr_dict[f] = {}\n\n src = rasterio.open(f)\n arr = src.read(1, masked=True).filled(np.nan)\n arr[arr <= -9999] = np.nan\n rstr_dict[f]['arr'] = arr\n rstr_dict[f]['mu'] = np.nanmean(rstr_dict[f]['arr'])\n rstr_dict[f]['sigma'] = np.nanstd(rstr_dict[f]['arr'])\n rstr_dict[f]['CV'] = np.divide(rstr_dict[f]['sigma'], rstr_dict[f]['mu'])\n rstr_dict[f]['profile'] = src.profile\n rstr_dict[f]['year'] = re.findall('(\\d{4})', f)\n\n return rstr_dict", "def find_data_files(source, target, patterns):\r\n if glob.has_magic(source) or glob.has_magic(target):\r\n raise ValueError(\"Magic not allowed in src, target\")\r\n ret = {}\r\n for pattern in patterns:\r\n pattern = os.path.join(source, pattern)\r\n for filename in glob.glob(pattern):\r\n if os.path.isfile(filename):\r\n targetpath = os.path.join(target, os.path.relpath(filename,source))\r\n path = os.path.dirname(targetpath)\r\n ret.setdefault(path, []).append(filename)\r\n return sorted(ret.items())", "def _recursivelyFindFile(self, topLevelDirectory, filename):\n print ('finding ' + filename + '...\\n')\n tempSubDirs = {} #initialize temporary dictionary of sbudirectories\n \n for dirpath, dirnames, filenames in os.walk(topLevelDirectory):\n #print '---dirpath---'\n #print dirpath\n #print '---dirnames---'\n #print dirnames\n #print '---filenames---'\n #print filenames\n #print '------'\n for f in filenames:\n #check filenames for a match\n if f == filename:\n tempSubDirs[dirpath] = True\n\n #in Python 3 dict.keys(), dict.values() and dict.items() will all return iterable views instead of lists \n if sys.version_info >= (3, 0):\n return list(tempSubDirs.keys())\n \n return tempSubDirs.keys()", "def read_from_folder(folder):\n\n raw_dict = dict()\n\n for file in os.listdir(folder):\n raw = read_raw_from_file(os.path.join(folder, file))\n raw_dict[file] = raw\n\n return raw_dict", "def _load_files(self):\n files = {}\n for fn_ in self.opts[\"src\"]:\n if os.path.isfile(fn_):\n files.update(self._file_dict(fn_))\n elif os.path.isdir(fn_):\n salt.utils.stringutils.print_cli(\n \"{} is a directory, only files are supported \"\n 'in non-chunked mode. Use \"--chunked\" command '\n \"line argument.\".format(fn_)\n )\n sys.exit(1)\n return files", "def scan(self,project_dir):\n ftypes = [\".csv\", \".data\", \".xlsx\"]\n print(\"Scanning directory : \",project_dir)\n print(\"Searching for : \",ftypes)\n self.localfiles = {}\n for dirpath, dirnames, filenames in os.walk(project_dir, topdown=True):\n for filename in filenames:\n for ftype in ftypes:\n if ftype in filename:\n self.localfiles[filename] = {\n \"filename\": filename,\n \"filesize\": getsize(os.path.join(dirpath, filename)),\n \"abspath\": os.path.join(dirpath, filename),\n \"dirpath\": dirpath,\n \n }\n print(\"Found These: \",[file_name for file_name in self.localfiles.keys()])", "def load_files(directory):\n onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f))]\n\n dicArquivos = dict()\n for file in onlyfiles:\n f = open(join(directory,file),\"r\", encoding='utf8')\n conteudo = f.read()\n dicArquivos[file] = conteudo\n f.close()\n\n return dicArquivos", "def load_files(directory):\n import os\n import re\n\n files = dict()\n\n for file in os.scandir(directory):\n if re.search(\".txt$\", file.name):\n with open(file.path, \"r\", encoding=\"utf8\") as f:\n # re.sub(\".txt$\", \"\", file.name)\n files[file.name] = f.read()\n\n return files", "def get_fs_dict (\n initial_root, create_item=None, dict_cls=dict,\n dirname_filter=None, filename_filter=None,\n include_root=False, toplevel_files=True, prune_empty=False, file_key=None,\n):\n # TODO(could-do): max_depth=N\n fsdict = dict_cls()\n get_file_key = ( lambda x: x ) if file_key is None else file_key\n\n\n for root, dict_relpath, dirnames, filenames in walk_relpath (\n initial_root, include_root=include_root, prune_empty=prune_empty,\n dirname_filter=dirname_filter, filename_filter=filename_filter\n ):\n if dict_relpath:\n dictpath = dict_relpath.split ( os.sep )\n parent = functools.reduce ( dict_cls.get, dictpath[:-1], fsdict )\n\n if create_item is None:\n parent [dictpath[-1]] = dict_cls.fromkeys (\n map ( get_file_key, filenames )\n )\n else:\n parent [dictpath[-1]] = dict_cls (\n (\n get_file_key ( fname ),\n create_item ( ( root + os.sep + fname ), fname, root )\n )\n for fname in filenames\n )\n\n elif not toplevel_files:\n pass\n\n elif create_item is None:\n for fname in filenames:\n fsdict [get_file_key(fname)] = None\n\n else:\n for fname in filenames:\n fsdict [get_file_key(fname)] = create_item (\n ( root + os.sep + fname ), fname, root\n )\n # -- end for\n\n return fsdict", "def walk_recursive(root, pattern='*.py'):\r\n for root, dirnames, filenames in os.walk(root):\r\n for filename in fnmatch.filter(filenames, pattern):\r\n yield os.path.join(root, filename)", "def make_all_files_dictionary(self, all_files, append_to_this=False):\n if append_to_this:\n rdict = append_to_this\n else:\n rdict = {}\n\n all_files.sort()\n for i in all_files:\n count = len(rdict) + 1\n i = os.path.abspath(os.path.expanduser(i))\n\n if platform.system() == \"Windows\":\n full_filename = i.split('\\\\')\n else:\n full_filename = i.split('/')\n\n full_filename = full_filename[-1]\n\n extension = full_filename.split('.')\n extension = extension[-1]\n extension = extension.upper()\n\n filename = full_filename.split('.')\n filename.pop(-1)\n filename = '.'.join(filename)\n\n rdict[i] = dict(\n path=i,\n processed=False,\n drawn=False,\n count=count,\n filename=filename,\n extension=extension,\n status='UNPROCESSED',\n )\n\n return rdict", "def load_files(directory):\n fileDict = {}\n files = os.listdir(directory)\n for f in files:\n fpath = os.path.join(directory, f)\n with open(fpath) as myFile:\n text = myFile.read()\n fileDict[f] = text\n return fileDict", "def gen_recursive_filelist(d):\n \n for root, directories, files in os.walk(d):\n for file in files:\n yield os.path.join(root, file)", "def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results", "def extract_folder_file_structure() -> Dict[str, List[str]]:\n folders_and_files = {}\n for path_to_folder in glob.glob(f\"{ZULIPTERMINAL}/**/\", recursive=True):\n complete_directory_path = Path(path_to_folder)\n if complete_directory_path.name in FOLDERS_TO_EXCLUDE:\n continue\n relative_directory_path = complete_directory_path.relative_to(ROOT_DIRECTORY)\n if str(relative_directory_path) not in DESC_FOR_NO_FILE_FOLDERS:\n files_in_directory = [\n file.name\n for file in complete_directory_path.glob(\"*.py\")\n if file.name != \"__init__.py\"\n ]\n folders_and_files[str(relative_directory_path)] = files_in_directory\n return folders_and_files", "def read_project(path: str):\n textfilecontent = {}\n\n # Discover .txt files and add them to the dictionary\n for filepath in iglob(os.path.join(path, '**/*.txt'), recursive=True):\n add_path_dict(input_dict=textfilecontent, start_path=path,\n file_path=filepath)\n\n return textfilecontent", "def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths", "def get_changed_files_and_content(path_to_repository, file_encoding, ignore_subrepositories):\n changed_files = filter_changed_files(get_changed_files(path_to_repository, ignore_subrepositories),\n path_to_repository, file_encoding)\n return {filename: open(os.path.join(path_to_repository, filename), encoding=file_encoding).read() for filename in\n changed_files}", "def process_folder(root, path=\"\"):\n myDict = {}\n if path:\n if root.cd(path):\n for key in ROOT.gDirectory.GetListOfKeys():\n filterKey(root, key, path, myDict, \"__List\")\n else:\n for key in ROOT.gDirectory.GetListOfKeys():\n mypath = ROOT.gDirectory.GetPathStatic()\n filterKey(root, key, mypath, myDict, \"\")\n ROOT.gDirectory.cd(mypath)\n return myDict", "def default_file_hierarchy_dict():\n return {\n directory(\"include\"): {\n directory(\"with spaces\"): {\n file(\"with spaces.hpp\"): {\n namespace(\"with_spaces\"): {\n function(\"int\", \"value\"): parameters()\n }\n }\n }\n }\n }" ]
[ "0.66049916", "0.6600909", "0.63020784", "0.58902156", "0.58048713", "0.5802294", "0.5765301", "0.57375884", "0.5660179", "0.5609506", "0.5603751", "0.5581873", "0.55007124", "0.5482791", "0.5479842", "0.5460904", "0.54340225", "0.53852797", "0.53830934", "0.5363218", "0.5351172", "0.5274727", "0.5267546", "0.5222523", "0.5222153", "0.522103", "0.5216388", "0.5212971", "0.52128524", "0.518316" ]
0.7256247
0
Takes file dictionary created with `get_dir_files` and returns dictionary of the same kind containing only audio files of supported types.
def filter_target_extensions(self, files_dict): files_filtered = defaultdict(list) supported_formats = self.sox_get_supported_formats() logging.info('Filtering audio files ...') paths = list(files_dict.keys()) for path in paths: if not path.endswith('letmehear'): files = sorted(files_dict[path]) for f in files: if os.path.splitext(f)[1].lstrip('.').lower() in supported_formats: files_filtered[path].append(f) return files_filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name, ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name,ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def load_all_sfx(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n effects = {}\n for fx in os.listdir(directory):\n name, ext = os.path.splitext(fx)\n if ext.lower() in accept:\n effects[name] = pg.mixer.Sound(os.path.join(directory, fx))\n return effects", "def __load_all_sounds(sounds_dict, directory, accept=('.ogg')):\r\n for sound in os.listdir(directory):\r\n name, ext = os.path.splitext(sound)\r\n if ext.lower() in accept:\r\n sounds_dict[name] = pygame.mixer.Sound(os.path.join(directory, sound))", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def getAudioFileFromFilelist(audiofiltered):\n for audioFile in audiofiltered:\n audioRoot, audioExt = os.path.splitext(audioFile)\n if audioExt in ['.wav', '.aiff', '.aif']:\n return audioFile", "def get_wavs_dict_list(test_dir):\n # Find all clean files and make an {id: filepath} dictionary\n clean_wavs = glob.glob(os.path.join(test_dir, \"clean/*.wav\"))\n clean_dic = make_wav_id_dict(clean_wavs)\n # Same for noisy files\n noisy_wavs = glob.glob(os.path.join(test_dir, \"noisy/*.wav\"))\n noisy_dic = make_wav_id_dict(noisy_wavs)\n assert clean_dic.keys() == noisy_dic.keys()\n # Combine both dictionaries\n dict_list = [dict(clean=clean_dic[k], noisy=noisy_dic[k], id=k) for k in clean_dic.keys()]\n return dict_list", "def getAudioFiles(directory):\n\n # Fetch list of files in selected directory\n fileList = os.listdir(directory)\n fileList.sort()\n\n # Create Audio objects\n audioList = []\n for f in fileList:\n if f.endswith('.wav'):\n audioList.append(Audio(directory, f))\n\n return audioList", "def filterAudioFilesFromFilelist(filelist):\n audioFileList = []\n for audioFilter in filelist:\n audioRoot, audioExt = os.path.splitext(audioFilter)\n if audioExt in ['.wav', '.aiff', '.aif']:\n audioFileList.append(audioFilter)\n # end for loop\n return audioFileList", "def getSupportedFileFormats():\n return {\"Bitmap\":[\"*.bmp\", \"*.dib\"], \"JPEG\": [\"*.jpeg\", \"*.jpg\", \"*.jpe\"], \"JPEG 2000\": [\"*.jp2\"],\"Portable Network Graphics\" : [\"*.png\"], \"WebP\": [\"*.webp\"], \"Portable Image Formats\":[\"*.pbm\", \"*.pgm\", \"*.ppm\"], \"Sun Rasters\":[\"*.sr\", \"*.ras\"], \"TIFF Files\": [\"*.tiff\",\"*.tif\"] }", "def list_files_to_convert():\n for root, dirs, files in os.walk(video_dir):\n file_list = [name for name in files if not name.endswith('.mp3')]\n for name in file_list:\n filepath = os.path.join(root, name)\n media_info = MediaInfo.parse(filepath, library_file=dll_path)\n for track in media_info.tracks:\n if 'Audio' in track.track_type:\n # print(track.track_type, track.bit_rate)\n # print(filepath, \"Is an Audio/Video file, and should be converted because a sound track is found\")\n yield dict(path=filepath, info=media_info)", "def _load_audio_list(self, path):\n\n result = {}\n\n for entry in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=4):\n for index, _ in enumerate(entry):\n if entry[index] == '\\\\N':\n entry[index] = None\n\n if len(entry) < 4:\n entry.extend([None] * (4 - len(entry)))\n\n if not self.include_empty_licence and entry[2] is None:\n continue\n\n if self.include_licenses is not None and entry[2] not in self.include_licenses:\n continue\n\n result[entry[0]] = entry[1:]\n\n return result", "def _get_extension_to_type_map(file_types):\n extension_to_type = dict()\n for file_type in file_types:\n for file_ext in file_type['extensions']:\n if file_ext not in extension_to_type:\n extension_to_type[file_ext] = file_type\n return extension_to_type", "def filter_fontfiles(self, filenames, d=dict()):\n for f in filenames:\n n, ext = os.path.splitext(f)\n # skip for the files that are not supported\n if not ext in SUPPORTED_FONTS: continue\n\n d[n] = d[n] + [ext] if d.get(n) else [ext]\n return d", "def _get_mime_to_type_map(file_types):\n mime_to_type = dict()\n for file_type in file_types:\n if 'mime' in file_type and file_type['mime'] not in mime_to_type:\n mime_to_type[file_type['mime']] = file_type\n return mime_to_type", "def get_music_files(pth: pathlib.Path) -> typing.List[mutagen.FileType]:\n file_names = [os.path.join(pth, f) for f in os.listdir(pth)]\n files = [mutagen.File(f) for f in file_names if os.path.isfile(f)]\n return [f for f in files if f is not None]", "async def audiofiles(self, ctx):\r\n files = '\"{0}\"'.format('\", \"'.join(self.audio_files))\r\n await ctx.send(\"```Available audio files :\\n{0}```\".format(files))", "def _init_wave_files(self, files, directory):\n\n # 2048 triggers bug in https://github.com/adafruit/circuitpython/issues/3030\n self._file_buf = bytearray(512) # DO NOT CHANGE size til #3030 is fixed\n\n missing = []\n fhs = {}\n for file in files:\n wav_file = None\n filename = directory + \"/\" + file + \".wav\"\n try:\n wav_file = open(filename, \"rb\")\n fhs[file] = WaveFile(wav_file, self._file_buf)\n except OSError:\n # OSError: [Errno 2] No such file/directory: 'filename.ext'\n missing.append(filename)\n\n # Raises an exception at the end to allow it to report ALL\n # of the missing files in one go to help out the user\n if missing:\n raise SampleJukeboxError(missing)\n self._wave_files = fhs", "def parse(cls, file: Keyvalues) -> Dict[str, 'Sound']:\n sounds = {}\n for snd_prop in file:\n volume = split_float(\n snd_prop, 'volume',\n VOLUME.__getitem__,\n 1.0,\n )\n pitch = split_float(\n snd_prop, 'pitch',\n Pitch.__getitem__,\n 100.0,\n )\n\n if 'soundlevel' in snd_prop:\n level = split_float(\n snd_prop, 'soundlevel',\n Level.__getitem__,\n Level.SNDLVL_NORM,\n )\n elif 'attenuation' in snd_prop:\n atten_min, atten_max = split_float(\n snd_prop, 'attenuation',\n ATTENUATION.__getitem__,\n ATTENUATION['ATTN_IDLE'],\n )\n # Convert to a soundlevel.\n # See source_sdk/public/soundflags.h:ATTN_TO_SNDLVL()\n level = (\n (50.0 + 20.0 / atten_min) if atten_min else 0.0,\n (50.0 + 20.0 / atten_max) if atten_max else 0.0,\n )\n else:\n level = (Level.SNDLVL_NORM, Level.SNDLVL_NORM)\n\n # Either 1 \"wave\", or multiple in \"rndwave\".\n wavs: List[str] = []\n for prop in snd_prop:\n if prop.name == 'wave':\n wavs.append(prop.value)\n elif prop.name == 'rndwave':\n for subprop in prop:\n wavs.append(subprop.value)\n\n channel_str = snd_prop['channel', 'CHAN_AUTO'].upper()\n channel: Union[int, Channel]\n if channel_str.startswith('CHAN_'):\n channel = Channel(channel_str)\n else:\n channel = int(channel_str)\n\n sound_version = snd_prop.int('soundentry_version', 1)\n\n if 'operator_stacks' in snd_prop:\n if sound_version == 1:\n raise ValueError(\n 'Operator stacks used with version '\n f'less than 2 in \"{snd_prop.real_name}\"!'\n )\n\n start_stack, update_stack, stop_stack = (\n Keyvalues(stack_name, [\n prop.copy()\n for prop in\n snd_prop.find_children('operator_stacks', stack_name)\n ])\n for stack_name in\n ['start_stack', 'update_stack', 'stop_stack']\n )\n else:\n start_stack, update_stack, stop_stack = [None, None, None]\n\n sounds[snd_prop.name] = Sound(\n snd_prop.real_name,\n wavs,\n volume,\n channel,\n level,\n pitch,\n start_stack,\n update_stack,\n stop_stack,\n sound_version == 2,\n )\n return sounds", "def audio_media_type(name):\n return name.endswith(('.ogg', '.oga', '.m4a'))", "def load_music_files():\n # Make a list of music files, right now it is done by collection all files\n # below the current folder whose extension starts with mp3/wav \n print('Loading music files...')\n for path, dirs, files in os.walk('.'):\n for file_ in files:\n file_path = os.path.relpath(os.path.join(path, file_))\n url_path = os.path.join(*[quote(part) for part in os.path.split(file_path)]) \n ext = os.path.splitext(file_)[1].lower()\n name = os.path.splitext(file_)[0].lower()\n key = ''.join(name.split()) # unique key - no spaces\n audio_file = None\n if ext.startswith('.mp3'):\n audio = MP3(file_path) \n audio_file = AudioFile(url_path, audio.info.length, name, key) \n if audio_file:\n music_files.append(audio_file)\n print('Found:', music_files[-1])", "def ComputeFileTypes(self):\n for rel_path, file_data in self._files.iteritems():\n if 'ftype' in file_data:\n continue\n ftype = self._file_type_decoder.GetType(rel_path)\n if ftype:\n file_data['ftype'] = ftype", "def get_allowed_file_types(self):\n return self.allowed_file_types", "def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)", "def _get_types(self):\n types = {'word': [constants.PAD, constants.UNK],\n 'char': [constants.PAD, constants.UNK],\n 'tag': [constants.PAD],\n }\n\n for _, filepath in self.directory.items():\n if filepath is not None:\n conll_file = os.path.basename(filepath) # get name of conll file\n types['word'].extend(set(self.conll_parser.words(conll_file)))\n types['char'].extend(set(chain(*[list(w) for w in self.conll_parser.words(conll_file)])))\n types['tag'].extend(set([tag[-1] for tag in self.conll_parser.tagged_words(conll_file)]))\n\n # ensure that we have only unique types\n types['word'] = list(set(types['word']))\n types['char'] = list(set(types['char']))\n types['tag'] = list(set(types['tag']))\n\n return types", "def audio_content_filter(item):\n return item.media_content_type.startswith(\"audio/\")", "def fileTypesCallback(self):\n if self.__e5project.getProjectType() == \"Django\":\n fileTypes = {\n \"*.html\": \"FORMS\",\n \"*.htm\": \"FORMS\",\n \"*.js\": \"SOURCES\",\n \"*.pot\": \"TRANSLATIONS\",\n \"*.po\": \"TRANSLATIONS\",\n \"*.mo\": \"TRANSLATIONS\",\n \"*.py\": \"SOURCES\",\n }\n else:\n fileTypes = {}\n return fileTypes", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def test_audio_extension(filename):\n\t\n\t# We need to make a test for extention :\n\timport os\n\textension = os.path.splitext(filename)[1]\n\text_ok=['.aif','.aifc','.aiff','.au','.cda','.m4a','.m4b','.m4p','.mid','.midi','.mka','.mkv','.mod','.mp1','.mp2','.mp3','.mpa','.nst','.odm','.ogg','.ptm','.ra','.s3m','.snd','.wav','.wma','.wow','.xm','.AIF','.AIFC','.AIFF','.AU','.CDA','.M4A','.M4B','.M4P','.MID','.MIDI','.MKA','.MKV','.MOD','.MP1','.MP2','.MP3','.MPA','.NST','.ODM','.OGG','.PTM','.RA','.S3M','.SND','.WAV','.WMA','.WOW','.XM']\n\tif extension in ext_ok:\n\t\treturn True\n\telse:\n\t\treturn False", "def load_all_fonts(directory, accept=(\".ttf\",)):\n return load_all_music(directory, accept)" ]
[ "0.69851065", "0.69818187", "0.66238374", "0.6515361", "0.62522674", "0.6233915", "0.6141677", "0.6095092", "0.5976191", "0.59475046", "0.578013", "0.5737146", "0.5723099", "0.56094027", "0.56092876", "0.5605638", "0.5604673", "0.55708057", "0.5556047", "0.5521467", "0.5482845", "0.546647", "0.5445079", "0.5438249", "0.54204553", "0.54019856", "0.5392287", "0.53860587", "0.53828895", "0.5349221" ]
0.7316186
0
Used to set output file(s) length in seconds.
def set_part_length(self, seconds): self._part_length = seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outputFiles(self, filesizelist):\n self.outputs = filesizelist\n self.outputSize = reduce(lambda x,y: x + y[1], filesizelist, 0)", "def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...", "def setoutputsize(self, size, column=None):\n pass", "def _get_file_length(self, file):\n self[file] = file.stat().st_size", "def _create_sparsed_file(self, nms, path, size):\n nms.appliance.execute(\n 'truncate --size %(size)dG %(path)s' % {\n 'path': path,\n 'size': size\n }\n )", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "async def gpt2_set_length(self, ctx, *, arg=None):\n print('Command gpt2_set_length triggered')\n if arg:\n try:\n i = int(arg)\n assert (i > 0) and (i < 1024)\n except ValueError or AssertionError:\n ctx.send(\"ERROR: Argument must be a positive integer number\")\n self.update_config(length=arg)\n else:\n await ctx.send(\"ERROR: Argument required\")", "def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length", "def output_time(self, output_time):\n\n self._output_time = output_time", "def msize(path):\n with open(path, \"w\") as w:\n w.write(\"\")\n os.utime(path, (0, 0))\n time.sleep(0.4)\n with open(path, \"w\") as w:\n w.write(\"0\")\n os.utime(path, (0, 0))", "def duration_in_seconds(self, value):\n self.__duration = (value * 1000000)", "def setLength(self, new_length):\n\n self.length = new_length", "def change_tail_length(self, value):\n self.layer.tail_length = value", "def set_time_step_size(self, delta_t):\n self.delta_t = delta_t", "def set_duration(self, duration):\n self.__test_result[Result.__DURATION] = round(duration * 1000)", "def _update_cmd_time_info(self, end=False):\n time_stamp = time.time()\n time_passed = time_stamp - self._start_time\n if end:\n docs_proc_now = self._docs_processed % self._file_write_threshhold\n if docs_proc_now == 0:\n msg = ('Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(\n docs_proc_now, self._docs_processed, time_passed))\n else:\n msg = ('Writing {} documents to file. '\n 'Written {} documents to file in total. '\n 'Time passed: {:2f}')\n print(msg.format(self._file_write_threshhold,\n self._docs_processed, time_passed))", "def duration(self):\n pass", "def duration(self):\n pass", "def test_change_size(self, os_mock):\n os_mock.path.isfile.return_value = True\n os_mock.path.getsize.return_value = 42000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 42000)\n\n os_mock.path.getsize.return_value = 43000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)\n\n self.data.status = Data.STATUS_DONE\n os_mock.path.getsize.return_value = 44000\n hydrate_size(self.data)\n self.assertEqual(self.data.output['test_file']['size'], 43000)", "def output_per_job_size_response_time(self):\r\n results_dirname = get_param('results_dir')\r\n num_tasks_to_response_times = {}\r\n for job in self.completed_jobs:\r\n if job.num_tasks not in num_tasks_to_response_times:\r\n num_tasks_to_response_times[job.num_tasks] = []\r\n num_tasks_to_response_times[job.num_tasks].append(\r\n job.response_time())\r\n \r\n n = get_param(\"num_tasks\")\r\n probes_ratio = get_param(\"probes_ratio\")\r\n for num_tasks, response_times in num_tasks_to_response_times.items():\r\n filename = os.path.join(\r\n results_dirname,\r\n \"%s_response_time_%s\" % (get_param(\"file_prefix\"),\r\n num_tasks))\r\n if get_param('first_time'):\r\n f = open(filename, 'w')\r\n f.write(\"n\\tProbesRatio\\tUtil.\\tMean\\tStdDev\\t99Pctl\\t\"\r\n \"NetworkDelay\\n\")\r\n f.close()\r\n f = open(filename, 'a')\r\n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" %\r\n (n, probes_ratio, self.utilization,\r\n stats_mod.lmean(response_times), \r\n stats_mod.lstdev(response_times),\r\n stats_mod.lscoreatpercentile(response_times,.99),\r\n get_param(\"network_delay\")))\r\n f.close()", "def setMinOutputLength(self, value):\n return self._set(minOutputLength=value)", "def setMinOutputLength(self, value):\n return self._set(minOutputLength=value)", "def duration(self, duration):\n self._duration = duration", "def duration(self, duration):\n self._duration = duration", "def WriteFileSize(self):\n # Simply a calculation of the number of clusters (e.g. sectors) * 512\n total_size = 0\n for cluster_range in self.cluster_ranges:\n clusters = cluster_range.split(\"-\")\n difference = int(clusters[1]) - int(clusters[0]) + 1\n self.cluster_list.extend(self.CreateList(int(clusters[0]), int(clusters[1])))\n print(f\"Cluster difference between {clusters[1]} and {clusters[0]} is {difference}\")\n total_size += difference*512\n print(f\"Total size has been calculated as {total_size}\")\n with open(self.output_file, \"r+b\") as fh:\n seeker = (self.root_directory_offset*self.sector_size)+((self.index_number-1)*self.directory_index_size)+(self.file_size_offset)\n #s_array = bytearray()\n print(f\"Reversing {total_size}\")\n ba_size = (total_size).to_bytes(4, byteorder='little')\n print(f\"Preparing to write {ba_size} to {seeker}\")\n fh.seek(seeker)\n fh.write(ba_size)\n print(\"File size written to root directory\")\n return True", "def set_running_time(self, t):\n util.write_to_file(self.running_time_file, str(int(t)))", "def path_length(self,path,num_repeats=10):\n begin_time=datetime.datetime.now()\n #num_repeats=100\n for i in range(num_repeats):\n self.virtual_move_to(path)\n end_time=datetime.datetime.now()\n delta_t=end_time-begin_time\n path_length=delta_t.total_seconds()/float(num_repeats)\n if path_length ==0.0:\n print(\"Warning the path length is less than 1 microsecond, make sure num_repeats is high enough to measure it.\")\n return path_length", "def setTickLength(major=24,minor=16):\n dislin.ticlen(major,minor)", "def duration(file_path):\n command = [\"ffprobe\", \"-show_entries\", \"format=duration\", \"-i\", file_path]\n pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)\n out, error = pipe.communicate()\n match_object = None if error else DURATION_REGEX.search(out.decode('utf-8'))\n if match_object is None:\n return 0\n length = float(match_object.group(1)) / 60\n return length", "def Resize(self):\n\n self.history_length = int( round( self.owner['time_span']/self.owner['sample_speed']))\n self.FreshStart()" ]
[ "0.5842382", "0.5635569", "0.56217164", "0.5617861", "0.55581576", "0.5520264", "0.5480444", "0.5435008", "0.54311806", "0.5418363", "0.5389413", "0.53848577", "0.53634846", "0.53174406", "0.5293262", "0.5286535", "0.52863", "0.52863", "0.52807194", "0.52784336", "0.5253117", "0.5253117", "0.52450377", "0.52450377", "0.52430785", "0.52311283", "0.52242696", "0.5214747", "0.52079105", "0.5199397" ]
0.6179856
0
Used to set a backshift number of seconds for every part to be taken from a previous one.
def set_backshift(self, seconds): self._backshift = seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prev_shifted_time(self):\n return self._prev_sim_time + self.options.time.start_clocktime", "def shift(self, num):\n if not num:\n self._times, self._values = self._times, self._values\n elif num > 0:\n self._times, self._values = self._times[:-num], self._values[num:]\n else:\n self._times, self._values = self._times[-num:], self._values[:num]", "def cool(self):\n self.t = self.t - 1", "def _shifted_time(self):\n return self.sim_time + self.options.time.start_clocktime", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def timeShiftPhotos(self,tdelta):\n self.imageDirectoryObj.__shift_datetimes__(tdelta)", "def move_back(t,n):\n lt(t)\n bk(t, n)\n rt(t)", "def test_shift_sets_new_tail_is_previous(new_dll):\n new_dll.shift()\n assert new_dll.tail.value == 4", "def ShiftTime(WName,lg_name):\n\tH_IN = mtd[WName]\n\tPC = H_IN.getRun()['proton_charge'].firstTime()\n\t#print \"P=\"+str(PC)+\"\\n\"\n\tP = H_IN.getRun()[lg_name].firstTime()\n\t#print \"P=\"+str(P)+\"\\n\"\n\tTdiff = PC-P\n\tTdiff_num = Tdiff.total_milliseconds()*1E-3\n\t#print \"Tdiff=\"+str(Tdiff_num)+\"\\n\"\n\tChangeLogTime(InputWorkspace=WName, OutputWorkspace = WName, LogName = lg_name, TimeOffset = Tdiff_num)", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def time_interval_sub(self, time_step, nsteps):\n world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting subtime\")", "def _set_window_time(slices, times):\n t_idx_ = [t[-1] for t in slices]\n return times[t_idx_]", "def setNumTimeSubSteps(*argv):", "def flip_t_b(self, times: int):\n for i in range(0, times):\n self.tile_rows = self.tile_rows[::-1]", "def set_time_of_last_turn(time: int):\n store.time_of_last_turn = time", "def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()", "def _reset(self) -> ts.TimeStep:", "def time_step(self):\n\n self.reinitialize_backup_containers()\n\n super().time_step()\n\n self.make_a_backup_for_t()", "def rotate():\n if counter == counter.max - 1:\n rotator.next = concat(rotator[3:], rotator[3])\n counter.next = 0\n else:\n counter.next = counter + 1\n\n # set initial bits\n if rotator == 0:\n rotator.next = 1", "def resetTimeSinceLastIntegration(subarray=DEFAULT) :\n multiSubarray('resetTimeSinceLastIntegration', subarray)", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def ab2_timestep(x, u, u_previous, timestep):\r\n return x + timestep * (1.5 * u - 0.5 * u_previous)", "def setTs(self, Ts):\r\n\t\tself.Ts = Ts\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setTs(self, Ts):\r\n\t\tself.Ts = Ts\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def TimeRebase():\r\n\tdef init(self, start):\r\n\t\t\"\"\" Start channel is the rebase \"\"\"\r\n\t\tself.start = start\r\n\t\tself.time = 0\r\n\t\tself.t = []\r\n\t\tself.ch = []\r\n\r\n\tdef processPacket(self, data_size, data_packet, packet_mode = 'i64u'):\r\n\t\tt, ch = decodePacket(data_packet, data_size, packet_mode) # done in python\r\n\t\tnew_t = []\r\n\t\tnew_ch = []\r\n\t\tfor i in range(len(ch)):\r\n\t\t\tif ch[i] == self.start:\r\n\t\t\t\tself.time = t[i]\r\n\t\t\telse:\r\n\t\t\t\tnew_t.append(t[i]-self.time)\r\n\t\t\t\tnew_ch.append(ch[i])\r\n\t\tself.t.append(np.array(new_t, dtype = np.int64))\r\n\t\tself.ch.append(np.array(new_ch, dtype = np.int8))", "def round_trip_time(self):\n ...", "def shuffle(self): \n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop()", "def tick(self):\n\n if self.seconds != 59:\n self.seconds += 1\n else:\n self.seconds = 0\n\n if self.minutes != 59:\n self.minutes += 1\n else:\n self.minutes = 0\n\n if self.hours != 23:\n self.hours += 1\n else:\n self.hours = 0", "def jump(self, seconds: float) -> None:\n if seconds < 0:\n raise ValueError(\"time can't go backwards\")\n self._virtual_base += seconds" ]
[ "0.6316349", "0.60978645", "0.590834", "0.5880278", "0.577192", "0.5754429", "0.56243247", "0.55874705", "0.5542577", "0.5534393", "0.5534393", "0.5491483", "0.5482042", "0.5442997", "0.5414095", "0.5402406", "0.5342204", "0.5330644", "0.5292622", "0.5289171", "0.5285309", "0.528143", "0.5278667", "0.5265445", "0.5265445", "0.526328", "0.52468115", "0.523017", "0.52292335", "0.5222209" ]
0.7543604
0
Used to set output file(s) speed. Expects ratio modifier, e.g. 1.3
def set_speed(self, ratio): self._speed = ratio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_speed():\n pass", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def set_speed(self,speed):\n self.speed_p = speed", "def set_speed(self,speed):\n self.speed = speed", "def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)", "def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)", "def set_speed(self, speed):\n self.speed = speed", "def set_speeds(self, speed_1, speed_2):\n pass", "def spit(self, speed=Constants.SPIT_SPEED):\n self.setPercentOutput(speed, -speed)", "def set_custom_speed(self, bytes_per_second):\n self._custom_speed = bytes_per_second", "def set_speed_manual(self, command_logger=None):\r\n pass", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def set_speed(self, speed):\n self._set_sub_text('speed', text=str(speed))\n return self", "def on_speed_change(self, event) -> None:\r\n\r\n speed_level = int(self.speed_scale.get())\r\n self.animator.time_per_gen = self.TIMES_PER_GEN[speed_level]", "async def async_set_speed(self, value) -> None:\n await self.write_attributes_safe({\"fan_mode\": value})", "def set_multiplex_ratio(ratio):\n send_command(0xA8)\n send_command(ratio)", "def speed(self, value: int, /) -> None:", "def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1", "def setSpeedEngine4(speed: int):\n pass", "def speed(self, s=0):", "def setSpeedEngine1(speed: int):\n pass", "def set_samplerate(self, samplerate):\n\t\tnew_samplerate = _PM_UPDATE_RATE/min(max(1,samplerate),200)\n\t\tshift = min(math.ceil(math.log(new_samplerate,2)),16)\n\t\tself.output_decimation = 2**shift\n\t\tself.output_shift = shift\n\n\t\tprint \"Output decimation: %f, Shift: %f, Samplerate: %f\" % (self.output_decimation, shift, _PM_UPDATE_RATE/self.output_decimation)", "def increment_speed(self):\n self.speed += 0.0004", "def set_move_speed(cls, quad):\n\n\t\tspeed = cls.get_address_value(quad.result)\n\t\treturn speed/1000.0", "def setSpeedEngine2(speed: int):\n pass", "def set_fan_speed(self, value):\n self.parent.fancoolers.set_speed(value)", "def speed(self, speed):\n self._speed = speed\n self._rotspeed = speed", "def set_vratio(self, setto):\n command = 'vratio ' + str(setto)\n self.run_command(command)", "def _update_speed(self, speed):\n if speed is None:\n return\n if speed == self._current_speed:\n return\n\n self._current_speed = speed\n self._update_speed_attributes()\n LOG.info(\n f\"Updated LUNOS {self._name}: {self.percentage}% {self._current_speed}\"\n )", "def movespeed(self, speed):\n self._speed = speed" ]
[ "0.6482639", "0.64649606", "0.6284442", "0.6176265", "0.6143324", "0.6112721", "0.6111029", "0.609452", "0.59899086", "0.5989725", "0.5981956", "0.5947498", "0.593144", "0.5919195", "0.5886926", "0.58778495", "0.58514136", "0.5839829", "0.5813059", "0.5805993", "0.5775662", "0.5774861", "0.57718927", "0.5737287", "0.5732397", "0.57036173", "0.5701866", "0.56928474", "0.56884575", "0.5687532" ]
0.7200764
0
Checks whether SoX is available.
def sox_check_is_available(self): result = self._process_command('sox -h', PIPE, supress_dry_run=True) return result[0] == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_available():", "def check_availability(self):\n pass", "def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def is_available(self) -> bool:\n raise NotImplementedError", "def available(self) -> bool:\n return True", "def available(self) -> bool:\n return True", "def _IsReady(self):\n if self.ip_address is None:\n self._GetIpAddress()\n if self.ip_address is not None:\n url = 'http://%s' % (self.ip_address)\n r = requests.get(url)\n if r.status_code == 200:\n return True\n return False", "def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0", "def is_available(self):\n raise NotImplementedError", "def available(self):\n return True", "def available(self):\n return True", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def is_available(self):\n\n return not rospy.is_shutdown()", "def available(self) -> bool:\n if self.entity_description.always_available:\n return True\n return self.knx.xknx.connection_manager.state is XknxConnectionState.CONNECTED", "def is_available(self):\n try:\n dummy = pycurl.Curl()\n except (NameError, AttributeError):\n return False\n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def isLicensed(self):\n try:\n if arcpy.CheckExtension(\"Spatial\") != \"Available\":\n raise Exception\n except Exception:\n return False # tool cannot be executed \n return True", "def is_setup(self):\n return self._market_data_sock_info.ready.is_set() and \\\n self._orders_sock_info.ready.is_set()" ]
[ "0.7127908", "0.70180845", "0.65250325", "0.6443219", "0.64104086", "0.64104086", "0.64104086", "0.6391573", "0.6366981", "0.6366981", "0.63340026", "0.63170475", "0.63147825", "0.62995595", "0.62995595", "0.6297277", "0.6297277", "0.6297277", "0.6264491", "0.61891913", "0.61452484", "0.61425257", "0.61425257", "0.61425257", "0.61425257", "0.61425257", "0.61425257", "0.61425257", "0.61425257", "0.6139797" ]
0.87171984
0
Asks SoX for supported audio files formats and returns them as a list.
def sox_get_supported_formats(self): formats = ['wav'] result = self._process_command('sox -h', PIPE, supress_dry_run=True) matches = re.findall(RE_SOX_AUDIO_SUPPORT, result[1][0]) if matches is not None: formats = matches[0].strip().split(' ') logging.debug('Sox supported audio formats: %s', formats) return formats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAudioFileFromFilelist(audiofiltered):\n for audioFile in audiofiltered:\n audioRoot, audioExt = os.path.splitext(audioFile)\n if audioExt in ['.wav', '.aiff', '.aif']:\n return audioFile", "def getSupportedFileFormats():\n return {\"Bitmap\":[\"*.bmp\", \"*.dib\"], \"JPEG\": [\"*.jpeg\", \"*.jpg\", \"*.jpe\"], \"JPEG 2000\": [\"*.jp2\"],\"Portable Network Graphics\" : [\"*.png\"], \"WebP\": [\"*.webp\"], \"Portable Image Formats\":[\"*.pbm\", \"*.pgm\", \"*.ppm\"], \"Sun Rasters\":[\"*.sr\", \"*.ras\"], \"TIFF Files\": [\"*.tiff\",\"*.tif\"] }", "async def audiofiles(self, ctx):\r\n files = '\"{0}\"'.format('\", \"'.join(self.audio_files))\r\n await ctx.send(\"```Available audio files :\\n{0}```\".format(files))", "def filter_target_extensions(self, files_dict):\n files_filtered = defaultdict(list)\n supported_formats = self.sox_get_supported_formats()\n logging.info('Filtering audio files ...')\n paths = list(files_dict.keys())\n\n for path in paths:\n if not path.endswith('letmehear'):\n files = sorted(files_dict[path])\n for f in files:\n if os.path.splitext(f)[1].lstrip('.').lower() in supported_formats:\n files_filtered[path].append(f)\n return files_filtered", "def getAudioFiles(directory):\n\n # Fetch list of files in selected directory\n fileList = os.listdir(directory)\n fileList.sort()\n\n # Create Audio objects\n audioList = []\n for f in fileList:\n if f.endswith('.wav'):\n audioList.append(Audio(directory, f))\n\n return audioList", "def available_input_formats():\n input_formats = []\n # Extensions.\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n input_formats.append(v.load().InputData.METADATA[\"driver_name\"])\n except ImportError:\n raise\n except Exception:\n pass\n return input_formats", "def OpenSoundFiles():\n\n wildcard = create_wildcard(\"Sound files\", sppas.src.audiodata.aio.extensionsul)\n wildcard += '|' + create_wildcard(\"All files\", ['*', '*.*'])\n\n files = list()\n dlg = wx.FileDialog(None, \n \"Select sound file(s)\", \n paths.samples, \n \"\", \n wildcard, \n wx.FD_OPEN | wx.MULTIPLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n files = dlg.GetPaths()\n\n dlg.Destroy()\n\n return files", "def filterAudioFilesFromFilelist(filelist):\n audioFileList = []\n for audioFilter in filelist:\n audioRoot, audioExt = os.path.splitext(audioFilter)\n if audioExt in ['.wav', '.aiff', '.aif']:\n audioFileList.append(audioFilter)\n # end for loop\n return audioFileList", "def get_supported_extensions(ext=\".as\"):\n result = list(ext + x for x in LOADERS.keys())\n result.append(ext)\n return result", "def get_import_formats(self):\n return [f for f in self.formats if f().can_import()]", "def test_available_input_formats():\n assert set([\"Mapchete\", \"raster_file\", \"vector_file\"]).issubset(\n set(available_input_formats())\n )", "def SupportedFiletypes( self ):\n return ['plaintex', 'tex']", "def SupportedFiletypes( self ):\n return ['plaintex', 'tex']", "def available_formats() -> List[str]:\n formats = [p.stem for p in Path(TEMPLATES_PATH).glob(f'*{TEMPLATE_SUFFIX}')]\n return formats", "def audio_format(self):\n return self.__audio_format", "def testGetAllowedConversionFormatList(self):\n get = Handler.getAllowedConversionFormatList\n # Handled mimetypes\n self.assertEquals(get(\"text/html;ignored=param\"),\n [(\"application/pdf\", \"PDF - Portable Document Format\")])\n\n # Unhandled mimetypes\n self.assertEquals(get(\"application/pdf;ignored=param\"), [])", "def load_all_sfx(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n effects = {}\n for fx in os.listdir(directory):\n name, ext = os.path.splitext(fx)\n if ext.lower() in accept:\n effects[name] = pg.mixer.Sound(os.path.join(directory, fx))\n return effects", "def test_audio_extension(filename):\n\t\n\t# We need to make a test for extention :\n\timport os\n\textension = os.path.splitext(filename)[1]\n\text_ok=['.aif','.aifc','.aiff','.au','.cda','.m4a','.m4b','.m4p','.mid','.midi','.mka','.mkv','.mod','.mp1','.mp2','.mp3','.mpa','.nst','.odm','.ogg','.ptm','.ra','.s3m','.snd','.wav','.wma','.wow','.xm','.AIF','.AIFC','.AIFF','.AU','.CDA','.M4A','.M4B','.M4P','.MID','.MIDI','.MKA','.MKV','.MOD','.MP1','.MP2','.MP3','.MPA','.NST','.ODM','.OGG','.PTM','.RA','.S3M','.SND','.WAV','.WMA','.WOW','.XM']\n\tif extension in ext_ok:\n\t\treturn True\n\telse:\n\t\treturn False", "def get_music_files(pth: pathlib.Path) -> typing.List[mutagen.FileType]:\n file_names = [os.path.join(pth, f) for f in os.listdir(pth)]\n files = [mutagen.File(f) for f in file_names if os.path.isfile(f)]\n return [f for f in files if f is not None]", "def formats(self):\n logger.debug(\"Get formats\")\n return self._raw_api.formats.get()", "def load_audio(path):\r\n if path[-4:] == \".wav\":\r\n fs, data = load_wav(path)\r\n\r\n elif path[-4:] == \".mp3\":\r\n fs, data = load_mp3(path)\r\n\r\n else:\r\n raise ValueError(\"Wrong file format, use mp3 or wav\")\r\n\r\n return fs, data", "def get_audioinput_plugins(self):\r\n unfiltered_plugins = self.plugmanc.getPluginsOfCategory(\"AudioInput\")\r\n return self._get_supported_plugins(unfiltered_plugins)", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name,ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name, ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def list_files_to_convert():\n for root, dirs, files in os.walk(video_dir):\n file_list = [name for name in files if not name.endswith('.mp3')]\n for name in file_list:\n filepath = os.path.join(root, name)\n media_info = MediaInfo.parse(filepath, library_file=dll_path)\n for track in media_info.tracks:\n if 'Audio' in track.track_type:\n # print(track.track_type, track.bit_rate)\n # print(filepath, \"Is an Audio/Video file, and should be converted because a sound track is found\")\n yield dict(path=filepath, info=media_info)", "def list_example_files():\n candidate_fns = os.listdir(data_dir())\n exts = ('.bed', '.gff', '.gtf', '.bed.gz', '.bam', '.gff.gz')\n valid_fns = [f for f in candidate_fns if f.endswith(exts)]\n return sorted(valid_fns)", "def formats():\n if PIL_ENABLED:\n return 'BMP', 'EPS', 'GIF', 'JPEG', 'MSP', 'PCX', 'PNG', 'SVG', 'TIFF', 'XBM'\n else:\n return 'EPS', 'SVG'", "def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "def load_all_audios(\n df: pd.DataFrame,\n *,\n target_sample_rate: int = None,\n mono: bool = constants.STEREO_TO_MONO_DEFAULT.value\n) -> Tuple[List[int], List[np.ndarray]]:\n file_list = list(df[\"audio_file_path\"])\n\n # audios is a list of (rate: int, data: np.ndarray)\n audios = load_multiple_wav(file_list)\n rate = [i[0] for i in audios]\n data = [i[1] for i in audios]\n\n # Convert to mono if needed\n if mono:\n data = p_map(stereo_to_mono, data, desc=\"Converting to mono...\")\n\n # Resample if needed\n if target_sample_rate is not None:\n data = p_map(resample, data, rate, [\n target_sample_rate for _ in data], desc=\"Resampling...\")\n rate = [target_sample_rate for _ in data]\n\n return rate, data", "def test_available_output_formats():\n assert set([\"GTiff\", \"PNG\", \"PNG_hillshade\", \"GeoJSON\"]).issubset(\n set(available_output_formats())\n )" ]
[ "0.66873074", "0.6548086", "0.6411624", "0.6321018", "0.63118595", "0.6293166", "0.6282144", "0.6252007", "0.61258864", "0.61227095", "0.60537213", "0.6052438", "0.6052438", "0.60509884", "0.6035291", "0.5974638", "0.58423185", "0.58144164", "0.578201", "0.575945", "0.56855935", "0.56457186", "0.5637037", "0.56359756", "0.56219476", "0.56048113", "0.55854607", "0.55284864", "0.54968226", "0.54784775" ]
0.8203795
0
Calculates maximum and minimum sample rates for given files. Returns tuple (min_rate, max_rate, files_to_rates_dict).
def sox_get_sample_rates(self, files): min_rate = max_rate = None files_to_rates_dict = {} for f in files: result = self._process_command('soxi -r "%s"' % f, PIPE) try: rate = int(result[1][0].strip('\n')) except ValueError: raise LetMeError('Unable to read sample rate from %s' % f) files_to_rates_dict[f] = rate logging.debug('\nSample rate `%s` for `%s`', rate, f) if max_rate is None or rate > max_rate: max_rate = rate if min_rate is None or rate < min_rate: min_rate = rate logging.debug('Sample rates: min - %s, max - %s', min_rate, max_rate) return min_rate, max_rate, files_to_rates_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sample_rate_according_filename(filename: str, sample_rates: Dict[str, int]) -> int:\n if filename.split('_')[-1] in ('right', 'left'):\n filename = filename.split('_')[0]\n return sample_rates[filename]", "def load_sample_rates(path: str) -> Dict[str, int]:\n sample_rates = pd.read_csv(path)\n result_dict = {x['filename'].split('.')[0]: x['frame_rate'] for index, x in sample_rates.iterrows()}\n return result_dict", "def find_time_limits(files):\n tmin=0\n tmax=2**60\n for f in files:\n arr=np.load(f, mmap_mode=\"r\")[:,1]\n tmin=max(tmin, np.min(arr))\n tmax=min(tmax, np.max(arr))\n return tmin,tmax", "def get_exact_sampling_rate(path_to_dir):\n\n # Test whether there is only one *.1 file in the directory.\n assert len(glob.glob(path_to_dir + \"*.1\")) == 1\n\n # Open *.1 file and get exact sampling rate.\n with open(glob.glob(path_to_dir + \"*.1\")[0], \"rb\") as binary_file:\n power_of_two = 64\n binary_file.seek(490 + (89 * power_of_two))\n couple_bytes = binary_file.read(8)\n sampling_rate = struct.unpack('d', couple_bytes)\n print(sampling_rate[0])\n assert 100 <= sampling_rate[0] <= 1024, 'Sampling rate funny. try different power_of_two, can be either 32 or 64'\n return sampling_rate[0]", "def sample_rate(self):\n\n properties_file = open(self.scenario_path + \"/conf/sandag_abm.properties\", \"r\")\n rate = None\n\n for line in properties_file:\n # strip all white space from the line\n line = line.replace(\" \", \"\")\n\n # find line containing \"sample_rates=\"\n m = re.compile(\"sample_rates=\").match(line)\n if m:\n # take the portion of the line after the matching string\n # and split by the comma character\n line = line[m.end():].split(\",\")\n\n # if the split line contains a single element return that element\n # otherwise return the final element\n if len(line) == 0:\n rate = float(line[0])\n else:\n rate = float(line[-1])\n break\n\n properties_file.close()\n\n return rate", "def bands(self):\n\t\treturn zip((self.primary_threshold, self.upper_earning_limit),\n\t\t\t\t self.rates)", "def find_maximum_and_minimum(file_name: str) -> Tuple[int, int]:\n min_number = max_number = None\n with open(file=file_name, mode=\"tr\") as file:\n\n for line in file:\n number = int(line)\n\n if min_number is None:\n min_number = max_number = number\n\n elif number > max_number:\n max_number = number\n\n elif number < min_number:\n min_number = number\n\n return min_number, max_number", "def new_ratio_max_min(metric_id_to_list_of_values: Dict[iter8id, Iterable[float]]):\n max_min_lists = {\n metric_id: [None, None] for metric_id in metric_id_to_list_of_values\n }\n logger.debug(\"mert metricid\")\n logger.debug(metric_id_to_list_of_values)\n for metric_id in metric_id_to_list_of_values:\n try:\n max_min_lists[metric_id][0], max_min_lists[metric_id][1] = min(metric_id_to_list_of_values[metric_id]), max(metric_id_to_list_of_values[metric_id])\n except:\n logger.debug(\"Empty list of values found for metric %s\", metric_id)\n \n max_min_lists[metric_id] = RatioMaxMin(\n minimum = max_min_lists[metric_id][0],\n maximum = max_min_lists[metric_id][1]\n )\n \"\"\"if the list of values is empty for a metric id, return None values for max and min\n \"\"\"\n \n return max_min_lists", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def get_rates(self, max_time=None, **kw):\n end_time = kw.pop('end_time', time.time())\n start_time = kw.pop('start_time', self.creation_time)\n if max_time:\n start_time = end_time - max_time\n ret = {}\n all_loggers_rate = 0.0\n for logger, name_map in self.acc_map.items():\n cur_logger_rate = 0.0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_rate = 0.0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_rate = acc.get_rate(start_time=start_time,\n end_time=end_time)\n ret[logger.name][name][status] = cur_rate\n cur_name_rate += cur_rate\n cur_logger_rate += cur_rate\n all_loggers_rate += cur_rate\n ret[logger.name][name]['__all__'] = cur_name_rate\n ret[logger.name]['__all__'] = cur_logger_rate\n ret['__all__'] = all_loggers_rate\n return ret", "def __init__(self, avg_rate, max_rate):\n self.average = avg_rate\n self.maximum = max_rate", "def compute_metrics_from_files(p_path_to_reference_file,\r\n p_path_to_candidate_file,\r\n p_max_bleu_order):\r\n\r\n reference_dictionary, reference_no_answer_query_ids = \\\r\n load_file(p_path_to_reference_file)\r\n candidate_dictionary, candidate_no_answer_query_ids = load_file(p_path_to_candidate_file)\r\n query_id_answerable = set(reference_dictionary.keys())-reference_no_answer_query_ids\r\n query_id_answerable_candidate = set(candidate_dictionary.keys())-candidate_no_answer_query_ids\r\n \r\n true_positives = len(query_id_answerable_candidate.intersection(query_id_answerable))\r\n false_negatives = len(query_id_answerable)-true_positives\r\n true_negatives = len(candidate_no_answer_query_ids.intersection(reference_no_answer_query_ids))\r\n false_positives = len(reference_no_answer_query_ids)-true_negatives\r\n precision = float(true_positives)/(true_positives+false_positives) if (true_positives+false_positives)>0 else 1.\r\n recall = float(true_positives)/(true_positives+false_negatives) if (true_positives+false_negatives)>0 else 1.\r\n F1 = 2 *((precision*recall)/(precision+recall))\r\n filtered_reference_dictionary = \\\r\n {key: value for key, value in reference_dictionary.items() \\\r\n if key not in reference_no_answer_query_ids}\r\n\r\n filtered_candidate_dictionary = \\\r\n {key: value for key, value in candidate_dictionary.items() \\\r\n if key not in reference_no_answer_query_ids}\r\n\r\n for query_id, answers in filtered_candidate_dictionary.items():\r\n assert \\\r\n len(answers) <= 1, \\\r\n 'query_id %d contains more than 1 answer \\\"%s\\\" in candidate file' % \\\r\n (query_id, str(answers))\r\n\r\n reference_query_ids = set(filtered_reference_dictionary.keys())\r\n candidate_query_ids = set(filtered_candidate_dictionary.keys())\r\n common_query_ids = reference_query_ids.intersection(candidate_query_ids)\r\n assert (len(common_query_ids) == len(reference_query_ids)) and \\\r\n (len(common_query_ids) == len(candidate_query_ids)), \\\r\n 'Reference and candidate files must share same query ids'\r\n\r\n all_scores = {}\r\n bleu_scores, _ = \\\r\n Bleu(p_max_bleu_order).compute_score(filtered_reference_dictionary, \\\r\n filtered_candidate_dictionary)\r\n for i, bleu_score in enumerate(bleu_scores):\r\n all_scores['bleu_%d' % (i+1)] = bleu_score\r\n\r\n rouge_score, _ = Rouge().compute_score(filtered_reference_dictionary, \\\r\n filtered_candidate_dictionary)\r\n all_scores['rouge_l'] = rouge_score\r\n all_scores['F1'] = F1\r\n similarity = 0\r\n for key in filtered_reference_dictionary:\r\n candidate_answer = nlp(filtered_candidate_dictionary[key][0])\r\n reference_answer = filtered_reference_dictionary[key]\r\n answersimilarity = 0\r\n for answer in reference_answer:\r\n answersimilarity += candidate_answer.similarity(nlp(answer))\r\n similarity += answersimilarity/len(reference_answer)\r\n semantic_similarity = similarity/len(filtered_reference_dictionary)\r\n all_scores['Semantic_Similarity'] = semantic_similarity\r\n return all_scores", "def calc_flow_rates(self, flow_pkts):\n flow_rates = {}\n for flowID, pkts in flow_pkts.items():\n prev_time = pkts[0][0]\n byte_cnt = 0\n flow_rates[flowID] = []\n for (cur_time, pkt) in pkts:\n if cur_time <= prev_time + self.avg_interval:\n # increment\n byte_cnt += pkt.length\n else:\n# # insert 0 samples if needed\n# for t in range(prev_time, cur_time, self.avg_interval)[0:-2]:\n# avg_time = (t + self.avg_interval/2.0)\n# flow_rates[flowID].append((avg_time, 0))\n# prev_time = t + self.avg_interval\n # update\n interval = cur_time - prev_time # ns\n rate = (byte_cnt*8.0)/float(interval) # Gbps\n avg_time = (cur_time + prev_time)/2.0\n flow_rates[flowID].append((avg_time, rate))\n # reset\n prev_time = cur_time\n byte_cnt = 0\n return flow_rates", "def getConvRates(self, from_rates, to_rates):\n\n conv_rates = list()\n try:\n for i in range(len(from_rates)):\n conv_rates.append(to_rates[i] / from_rates[i] if from_rates[i] != 0 else 0)\n except Exception as e:\n print('Could not calculate the conversion rate')\n print(e)\n\n return conv_rates", "def find_maximum_and_minimum(file_name: str) -> Tuple[int, int]:\n with open(file_name) as fi:\n min = int(fi.readline())\n max = min\n for line in fi:\n line = int(line.rstrip('\\n')) # удаление переноса строки\n if min >= line: # и перевод оставшейся части в число\n min = line\n elif max <= line:\n max = line\n return min, max", "def parse_age_rates(self, filename, factor, final):\n\n dat = load_age_rates(filename)\n rates = {}\n for line in dat:\n rates[line[0]] = [x * factor for x in line[1:]]\n if final:\n rates[101] = [100 for x in dat[0][1:]] # everybody dies...\n return rates", "def calc_flow_rates(self, flow_pkts):\n flow_rates = {}\n for flowID, pkts in flow_pkts.items():\n prev_time = pkts[0][0]\n byte_cnt = 0\n flow_rates[flowID] = []\n for (cur_time, pkt) in pkts:\n if cur_time <= prev_time + self.avg_interval:\n # increment\n byte_cnt += len(pkt)\n else:\n # insert 0 samples if needed\n for t in range(prev_time, cur_time, self.avg_interval)[0:-2]:\n avg_time = (t + self.avg_interval/2.0)\n flow_rates[flowID].append((avg_time, 0))\n prev_time = t + self.avg_interval\n # update\n interval = cur_time - prev_time # ns\n rate = (byte_cnt*8.0)/float(interval) # Gbps\n avg_time = (cur_time + prev_time)/2.0\n flow_rates[flowID].append((avg_time, rate))\n # reset\n prev_time = cur_time\n byte_cnt = 0\n return flow_rates", "def FileBetter(file_name_1, file_name_2, metric_column, method):\n # Store and parse our two files into lists of unique tuples.\n\n # Read the two files, parsing out lines starting with bitrate.\n metric_set1_sorted = ParseMetricFile(file_name_1, metric_column)\n metric_set2_sorted = ParseMetricFile(file_name_2, metric_column)\n\n\n def GraphBetter(metric_set1_sorted, metric_set2_sorted, base_is_set_2):\n \"\"\"\n Search through the sorted metric file for metrics on either side of\n the metric from file 1. Since both lists are sorted we really\n should not have to search through the entire range, but these\n are small files.\"\"\"\n total_bitrate_difference_ratio = 0.0\n count = 0\n for bitrate, metric in metric_set1_sorted:\n for i in range(len(metric_set2_sorted) - 1):\n s2_bitrate_0, s2_metric_0 = metric_set2_sorted[i]\n s2_bitrate_1, s2_metric_1 = metric_set2_sorted[i + 1]\n # We have a point on either side of our metric range.\n if metric > s2_metric_0 and metric <= s2_metric_1:\n\n # Calculate a slope.\n if s2_metric_1 - s2_metric_0 != 0:\n metric_slope = ((s2_bitrate_1 - s2_bitrate_0) /\n (s2_metric_1 - s2_metric_0))\n else:\n metric_slope = 0\n\n estimated_s2_bitrate = (s2_bitrate_0 + (metric - s2_metric_0) *\n metric_slope)\n\n # Calculate percentage difference as given by base.\n if base_is_set_2 == 0:\n bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /\n bitrate)\n else:\n bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /\n estimated_s2_bitrate)\n\n total_bitrate_difference_ratio += bitrate_difference_ratio\n count += 1\n break\n\n # Calculate the average improvement between graphs.\n if count != 0:\n avg = total_bitrate_difference_ratio / count\n\n else:\n avg = 0.0\n\n return avg\n\n # Be fair to both graphs by testing all the points in each.\n if method == 'avg':\n avg_improvement = 50 * (\n GraphBetter(metric_set1_sorted, metric_set2_sorted, 1) -\n GraphBetter(metric_set2_sorted, metric_set1_sorted, 0))\n elif method == 'dsnr':\n avg_improvement = bdsnr(metric_set1_sorted, metric_set2_sorted)\n else:\n avg_improvement = bdrate(metric_set2_sorted, metric_set1_sorted)\n\n return avg_improvement", "def parseRatios( self, ratios_files ):\n\t\t## unnecessary at the moment since each of the standardized ratios are different at the moment\n\t\t# i = 0\n\t\t# while i < len(ratios_files):\n\t\t# \tif i == 0:\n\t\t# \t\trats_df = pd.read_csv( gzip.open( ratios_files[i], 'rb' ), index_col=0, sep=\"\\t\" ) \n\t\t# \telse:\n\t\t# \t\tnew_df = pd.read_csv( gzip.open( ratios_files[i], 'rb' ), index_col=0, sep=\"\\t\" )\n\t\t# \t\trats_df = rats_df + new_df\n\t\t# \ti = i+1 ", "def getRatesInRange(self, currency_rates):\n\n rates = list()\n try:\n date = self.from_date\n while date <= self.to_date:\n rates.append(float(self.getMostRecentRelevantRate(currency_rates, date)))\n date += timedelta(days=1)\n except Exception as e:\n print('Could not retrieve rates')\n print(e)\n\n rates.reverse()\n return rates", "def parsefilenames(filenames):\n\n sequence_counts = collections.defaultdict(int)\n sequences_by_filenames = collections.defaultdict(list)\n\n for filename in filenames:\n\n for sequence, priority, frameno in extractsequences(filename):\n sequence_counts[sequence] += 1\n sequences_by_filenames[filename].append((sequence, priority, frameno))\n\n sequences = collections.defaultdict(Range)\n\n for filename, filesequences in sequences_by_filenames.items():\n (sequence, _, frameno) = max(filesequences, key=lambda s_p_f: (sequence_counts[s_p_f[0]], s_p_f[1]))\n sequences[sequence].add(frameno)\n \n return sorted(sequences.items())", "def tran_rate_cal():\n avg_result = {\n \"default\": [0, 0, 0, 0],\n \"node_amount\": [0, 0, 0, 0],\n \"tran_range\": [0, 0, 0, 0],\n \"ttl\": [0, 0, 0, 0]\n }\n\n inventory_path = [\"result/TranRate_default.xls\", \"result/TranRate_node.xls\", \"result/TranRate_tranrange.xls\",\\\n \"result/TranRate_ttl.xls\"]\n dict_key = [\"default\", \"node_amount\", \"tran_range\", \"ttl\"]\n\n for i in range(len(inventory_path)):\n time_use = list()\n total_infect = list()\n max_infect = list()\n loc = (inventory_path[i])\n w = xlrd.open_workbook(loc)\n sheet = w.sheet_by_index(0)\n sheet.cell_value(0, 0)\n for row in range(1, sheet.nrows):\n time_use.append(int(sheet.cell_value(row, 0)))\n total_infect.append(int(sheet.cell_value(row, 1)))\n max_infect.append(int(sheet.cell_value(row, 2)))\n\n average_time_use = sum(time_use)/len(time_use)\n average_total_infect = sum(total_infect)/len(total_infect)\n average_max_infect = sum(max_infect)/len(max_infect)\n\n avg_result[dict_key[i]][0] = average_total_infect\n avg_result[dict_key[i]][1] = average_time_use\n avg_result[dict_key[i]][2] = average_total_infect/average_time_use\n avg_result[dict_key[i]][3] = average_max_infect\n\n return avg_result", "def get_samp_rates(self):\n return _uhd_swig.usrp_source_get_samp_rates(self)", "def update(self, runningrates, rspec):\n # cache share for later comparison\n runningrates['share'] = self.Share\n\n # Query Node Manager for max rate overrides\n self.updateSliceTags(rspec)\n\n usedbytes = runningrates['usedbytes']\n usedi2bytes = runningrates['usedi2bytes']\n\n # Check limits.\n if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):\n sum = self.bytes + (self.ThreshKByte * 1024)\n maxbyte = self.MaxKByte * 1024\n bytesused = usedbytes - self.bytes\n timeused = int(time.time() - self.time)\n # Calcuate new rate. in bit/s\n new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))\n # Never go under MinRate\n if new_maxrate < (self.MinRate * 1000):\n new_maxrate = self.MinRate * 1000\n # State information. I'm capped.\n self.capped += True\n else:\n # Sanity Check\n new_maxrate = self.MaxRate * 1000\n self.capped += False\n\n if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):\n maxi2byte = self.Maxi2KByte * 1024\n i2bytesused = usedi2bytes - self.i2bytes\n timeused = int(time.time() - self.time)\n # Calcuate New Rate.\n new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))\n # Never go under MinRate\n if new_maxi2rate < (self.Mini2Rate * 1000):\n new_maxi2rate = self.Mini2Rate * 1000\n # State information. I'm capped.\n self.capped += True\n else:\n # Sanity\n new_maxi2rate = self.Maxi2Rate * 1000\n self.capped += False\n\n # Check running values against newly calculated values so as not to run tc\n # unnecessarily\n if (runningrates['maxrate'] != new_maxrate) or \\\n (runningrates['minrate'] != self.MinRate * 1000) or \\\n (runningrates['maxexemptrate'] != new_maxi2rate) or \\\n ('minexemptrate' in runningrates and runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \\\n (runningrates['share'] != self.Share):\n # Apply parameters\n bwlimit.set(xid = self.xid, dev = dev_default,\n minrate = self.MinRate * 1000,\n maxrate = new_maxrate,\n minexemptrate = self.Mini2Rate * 1000,\n maxexemptrate = new_maxi2rate,\n share = self.Share)\n\n # Notify slice\n if self.capped == True:\n self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)", "def getDataFilesForSamples(\n self, startSample: int, endSample: int\n ) -> Tuple[List[str], List[List[int]], List[float]]:\n # have the datafiles saved in sample order beginning with the earliest first\n # go through each datafile and find the range to be read\n dataFilesToRead = []\n samplesToRead = []\n scalings = []\n for idx, dFile in enumerate(self.dataFileList):\n fileStartSamp = self.dataRanges[idx][0]\n fileEndSamp = self.dataRanges[idx][1]\n if fileStartSamp > endSample or fileEndSamp < startSample:\n continue # nothing to read from this file\n # in this case, there is some overlap with the samples to read\n dataFilesToRead.append(dFile)\n readFrom = 0 # i.e. the first sample in the datafile\n readTo = fileEndSamp - fileStartSamp # this the last sample in the file\n if fileStartSamp < startSample:\n readFrom = startSample - fileStartSamp\n if fileEndSamp > endSample:\n readTo = endSample - fileStartSamp\n # this is an inclusive number readFrom to readTo including readTo\n samplesToRead.append([readFrom, readTo])\n scalings.append(self.scalings[idx])\n return dataFilesToRead, samplesToRead, scalings", "def read_csvs(files_to_read: Dict):\n res = {team: {} for team in TEAM_MAP[CFD]}\n for k, v in files_to_read.items():\n rating_system, file_data = get_csv_data_for_path(v)\n team_name_map_for_rating_system = TEAM_MAP[rating_system]\n for row in file_data:\n team, rtg = row[:2]\n standardized_team_name = team_name_map_for_rating_system[team][CFD]\n res[standardized_team_name].update({rating_system: float(rtg)})\n\n return res", "def get_rate_limits():\n client = get_rates_api()\n\n with catch_raise_api_exception():\n data, _, headers = client.rates_limits_list_with_http_info()\n\n ratelimits.maybe_rate_limit(client, headers)\n\n return {\n k: RateLimitsInfo.from_dict(v)\n for k, v in six.iteritems(data.to_dict().get(\"resources\", {}))\n }", "def processRatings(file):\n ratingsPerSubmission = {}\n with open(file) as ratings_file:\n next(ratings_file)\n for line in ratings_file:\n regexRes = re.search(\"^([a-z0-9]{24}),([0-4]),([0-4]),([0-4]),(.*)$\", line)\n if regexRes:\n if not regexRes.group(1) in ratingsPerSubmission:\n ratingsPerSubmission[regexRes.group(1)] = {\"DQual\": [], \"DIntrp\": [], \"TDepth-subtopic1\": 0,\n \"TDepth-subtopic2\": 0, \"TDepth-subtopic3\": 0, \"TDepth-subtopic4\": 0}\n if regexRes.group(2) != '0':\n ratingsPerSubmission[regexRes.group(1)]['TDepth-subtopic' + regexRes.group(2)] += 1\n ratingsPerSubmission[regexRes.group(1)]['DQual'].append(int(regexRes.group(3)))\n ratingsPerSubmission[regexRes.group(1)]['DIntrp'].append(int(regexRes.group(4)))\n\n # Calculate T-Depth\n for (submission, ratings) in ratingsPerSubmission.items():\n ratingsPerSubmission[submission]['TDepth'] = avg([min(3, ratings[\"TDepth-subtopic\" + str(i)]) for i in range(1, 5)])\n\n return ratingsPerSubmission", "def bdsnr(metric_set1, metric_set2):\n rate1 = [x[0] for x in metric_set1]\n psnr1 = [x[1] for x in metric_set1]\n rate2 = [x[0] for x in metric_set2]\n psnr2 = [x[1] for x in metric_set2]\n\n log_rate1 = map(lambda x: math.log(x), rate1)\n log_rate2 = map(lambda x: math.log(x), rate2)\n\n # Best cubic poly fit for graph represented by log_ratex, psrn_x.\n p1 = numpy.polyfit(log_rate1, psnr1, 3)\n p2 = numpy.polyfit(log_rate2, psnr2, 3)\n\n # Integration interval.\n min_int = max([min(log_rate1),min(log_rate2)])\n max_int = min([max(log_rate1),max(log_rate2)])\n\n # Integrate p1, and p2.\n p_int1 = numpy.polyint(p1)\n p_int2 = numpy.polyint(p2)\n\n # Calculate the integrated value over the interval we care about.\n int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)\n int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)\n\n # Calculate the average improvement.\n avg_diff = (int2 - int1) / (max_int - min_int)\n return avg_diff", "def get_rates(table_id):\n fields = [\"0\",\"0\",\"0\",\"0\",\"0\",\"0\"]\n for pos, name in enumerate(rates_key_list):\n full_table_id = RATES_TABLE_PREFIX + table_id\n counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name)\n if counter_data is None:\n fields[pos] = STATUS_NA\n elif fields[pos] != STATUS_NA:\n fields[pos] = float(counter_data)\n cntr = RateStats._make(fields)\n return cntr" ]
[ "0.65236944", "0.56431794", "0.54856896", "0.5406255", "0.53362864", "0.52786833", "0.52014273", "0.51985294", "0.51510394", "0.5121336", "0.5098462", "0.50319874", "0.50240964", "0.4994486", "0.49664557", "0.49629846", "0.49505916", "0.49496064", "0.49304843", "0.4920416", "0.49125457", "0.4904395", "0.48961106", "0.4889506", "0.4888221", "0.487757", "0.48674795", "0.48664328", "0.4830822", "0.48303866" ]
0.7960825
0
Returns temporary resampled file name from filepath.
def get_resampled_filename(filepath): try: filepath = filepath.encode('utf-8') except UnicodeDecodeError: pass return 'tmp_%s.flac' % md5(filepath).hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_temporary_file_name(original_file):\n directory, basename = os.path.split(original_file)\n basename = basename[-MAX_TEMPORARY_FILE_BASENAME_LENGTH:]\n\n random_hex = binascii.b2a_hex(os.urandom(16)).decode('utf-8')\n new_file_path = os.path.join(directory, '%s%s' % (random_hex, basename))\n\n return new_file_path", "def _get_temp_file_name():\n tmpfile = tempfile.NamedTemporaryFile(prefix=\"TESS_\")\n h, t = ntpath.split(tmpfile.name)\n return t or ntpath.basename(h)", "def sirsam_resample(data_sirsam):\n return os.path.join(data_sirsam, 'resampling')", "def get_filename(filepath):\n return filepath.replace(\"{}\\\\\".format(RES_DIR), \"\")", "def get_file_inter_name(self):\n\t\tf = tempfile.NamedTemporaryFile(encoding='utf-8',mode='r',delete=False)\n\t\tf.close()\n\t\treturn f.name", "def generate_temp_filename(self):\n prefix = self.generate_filename_prefix()\n now = datetime.now()\n # Ok that might not be the best timestamp system, but it's\n # enough for our needs.\n timestamp = '-'.join([\n ''.join([str(x) for x in now.timetuple()]),\n str(now.microsecond),\n str(randint(10000, 99999))])\n\n filename = prefix + timestamp\n return find_filename(self.tempdir,\n filename)", "def genSampleID(path):\n head, tail = ntpath.split(path)\n result = tail or ntpath.basename(head)\n return genBaseName(result.split(\".\")[0]) # Gets just the sample name, cleans out the \".cleaned.[EXT]\"", "def temp_file_name(suffix):\n return 'tmp%s%s' % (uuid.uuid4(), suffix)", "def sirsam_rs_out(sirsam_resample, sirsam_target_path):\n return os.path.join(sirsam_resample, 'out', \n os.path.splitext(os.path.basename(sirsam_target_path))[0] + '_resampled')", "def sample_file(self) -> str:\n return self._sample_file", "def generate_temp_path(self, file_format=\"\"):\n file_name = os.path.join(\n self.temp_folder,\n f\"temp_{str(time.time()).replace('.', '')}\"\n )\n if file_format:\n file_name += f\".{file_format}\"\n self.logger.debug(f\"Created filename at {file_name}\")\n return file_name", "def get_temp_file_path(self, filename, root=None):\n root = root or self.get_default_temp_dir()\n return root.join(filename)", "def temporary_file_path(self):\n return self.file.name", "def get_file_name() -> str:\n import uuid\n uniq_append_string = uuid.uuid4().hex\n return \"LOCAL_STORAGE_{}\".format(uniq_append_string)", "def get_complete_file_name(file_name: str):\n return LOGGING_DIRECTORY + file_name + '.txt'", "def sirsam_rs_precomp(sirsam_resample, sirsam_target_path):\n return os.path.join(sirsam_resample, 'precomputed',\n os.path.splitext(os.path.basename(sirsam_target_path))[0] + '_resampled')", "def temp_name(self, filename):\n if self.params.get('nopart', False) or filename == '-' or \\\n (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):\n return filename\n return filename + '.part'", "def unique_name(file: Union[str, pathlib.Path]) -> pathlib.Path:\n\n return supplement_file_name(file, uuid.uuid1())", "def get_file_path(filename: str):\n return TEMP_DIR.joinpath(filename)", "def update_destination_file_name (file_name):\n\tglobal COUNTER \n\tCOUNTER += 1\n\tsplitted = file_name.split('/')\n\treturn file_name[:len(file_name)-len(splitted[-1])] + 'Image%05d' % COUNTER +'_'+splitted[-1]", "def getFilePath(self, filename):\n idx = self._soundfiles.index(filename)\n return \"{}/{}\".format(self._soundpaths[idx], filename)", "def get_file_name(path):\n return os.path.basename(path)", "def get_filename(filepath):\n return os.path.basename(filepath)", "def _temp_path(self, uri_like):\n handle, filename = tempfile.mkstemp(suffix=uri_like.split(\"/\")[-1])\n os.close(handle)\n return filename", "def _get_filepath(self, name=None, use_timestamp=True):\n current_time = str(int(time.time()))\n if not name and not use_timestamp:\n raise Exception(\"Name or timestamp is required\")\n if name:\n self.fname = \"%s\" % name\n current_time = \"_%s\" % current_time\n if use_timestamp:\n self.fname = \"%s%s\" % (self.fname, current_time)\n if len(self.fname) > 0:\n self.fname = \"%s/%s.jpg\" % (self.picture_directory, self.fname)\n return self.fname", "def getTmpTemplateFile(fname):\n return os.path.join(Configurations.getTmpTemplateDir(), fname)", "def get_filename(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[0]", "def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]", "def filename_from_path(filepath: str) -> str:\n return filepath.split(\"/\")[-1]", "def __return_new_file_name(self, file_name: str, file_path: str):\n\n fastq_runid = re.split('[_.]', file_name) # split on `_` or `.`\n barcode_number = file_path.split(\"/\")[-1] # get the barcode number\n fastq_or_fasta = fastq_runid[-1] # get the .fastq/.fasta file extension\n\n # create the new file name\n new_file_name = \"_\".join(fastq_runid[:3]) # join first three elements\n new_file_name += \"_%s.%s\" % (barcode_number, fastq_or_fasta) # append the barcode number and file extension\n\n return new_file_name" ]
[ "0.67282295", "0.6667649", "0.66115636", "0.6438271", "0.64057815", "0.63899124", "0.63718104", "0.62631476", "0.6262597", "0.62606746", "0.6260422", "0.6186918", "0.61814684", "0.6142914", "0.6125212", "0.60798645", "0.6061999", "0.6057938", "0.60536873", "0.6019673", "0.60102904", "0.601025", "0.60035384", "0.59471357", "0.58892685", "0.58861727", "0.5882232", "0.58717936", "0.58581996", "0.585388" ]
0.76514924
0
Creates a source file at given target path from one more input files.
def sox_create_source_file(self, files, target): logging.debug('Source file will be made from:\n%s\n', '\n'.join(files)) logging.info('Making source file: %s', target) options = '' effects = '' files_to_precess = [] if len(files) > 1: options = '--combine concatenate' target_dir = os.path.dirname(target) for f in files: resampled_file = os.path.join(target_dir, self.get_resampled_filename(f)) if os.path.exists(resampled_file): f = resampled_file files_to_precess.append(f) if self._speed is not None: effects = 'speed %s' % self._speed command = 'sox -S --ignore-length %(options)s "%(files)s" "%(target)s" %(effects)s' % { 'options': options, 'files': '" "'.join(files_to_precess), 'target': target, 'effects': effects } self._process_command(command)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assemble_source_files(project, base_dir):\n source_files = project.source_files.all()\n for f in source_files:\n target_dir = os.path.join(base_dir, f.project_dir)\n abs_target = os.path.abspath(os.path.join(target_dir, f.file_name))\n if not abs_target.startswith(target_dir):\n raise Exception(\"Suspicious filename: %s\" % f.file_name)\n abs_target_dir = os.path.dirname(abs_target)\n if not os.path.exists(abs_target_dir):\n os.makedirs(abs_target_dir)\n f.copy_to_path(abs_target)", "def copy_files_individually(source_files, target_dir):\n\tfor source_file in source_files:\n\t\thead_tail = os.path.split(source_file)\n\t\tcopy_file(source_file, os.path.join(target_dir, head_tail[1]))", "def copyto_emitter(target, source, env):\n n_target = []\n\n for t in target:\n n_target = n_target + [t.File( str( s ) ) for s in source]\n\n return (n_target, source)", "def copy_source_files(or_dir,template_dir): \n def copy_sc(file,fpA,fpB):\n fpA = os.path.join(fpA,file)\n if os.path.isfile(fpA):\n shutil.copy(fpA,fpB)\n else:\n raise Exception(\"Error: File '{}' is missing\".format(file))\n return\n \n copy_sc('imta_core.sty',or_dir,template_dir)\n copy_sc('imta_extra.sty',or_dir,template_dir)\n copy_sc('imta_logo.pdf',or_dir,template_dir)\n copy_sc('imta_documentation.tex',or_dir,template_dir)\n print('Template files copied at {}'.format(template_dir))", "def copy_source_files(self):\n\n LOGGER.info(f'start copying source files')\n count = 0\n for sfp in tqdm(sorted(self.source_fps), disable=self.disable_tqdm):\n try:\n meta = extract_law_meta(sfp)\n nodes = parse_xml_fp(sfp)\n tfp = self.stot(sfp)\n tfp.parent.mkdir(parents=True, exist_ok=True)\n save_law_tree(meta['LawTitle'], nodes, tfp)\n except Exception as e:\n LOGGER.error(f'failed to copy {sfp}: {e}')\n continue\n self.target_fps.add(tfp)\n LOGGER.debug(f'copied {sfp} to {tfp}')\n count += 1\n LOGGER.info(f'copied total {count} source files, now total {len(self.target_fps)} target files exist')", "def combine_source_target_files(source_dir, target_dir, out_dir, file_matcher, original_ids=None):\n source_files = get_all_files(source_dir, file_matcher)\n target_files = get_all_files(target_dir, file_matcher)\n target_file_bases = np.array(list(map(lambda x: os.path.basename(x).lower(), target_files)))\n id_var = 'id'\n dedup_vars = [id_var]\n all_txt_vars = ['text', 'user_description', 'user_location']\n RETURN_CHAR_MATCHER = re.compile('[\\n\\r\\t]')\n if(not os.path.exists(out_dir)):\n os.mkdir(out_dir)\n for source_file in source_files:\n # find matching target file\n source_file_base = os.path.basename(source_file).lower()\n target_file_base_idx = np.where(target_file_bases == source_file_base)[0]\n combined_data_file_name = os.path.join(out_dir, source_file_base)\n# if(not os.path.exists(combined_data_file_name)):\n # if target file exists, then combine source/target\n if(len(target_file_base_idx) > 0):\n target_file_base_idx = target_file_base_idx[0]\n target_file = target_files[target_file_base_idx]\n try:\n source_data = pd.read_csv(source_file, sep='\\t', compression='gzip')\n if('Unnamed: 0' in source_data.columns):\n source_data.drop('Unnamed: 0', axis=1, inplace=True)\n # fix column name mismatches\n source_data.rename(columns={'user_screen_name' : 'screen_name', 'user_id' : 'author_id'}, inplace=True)\n target_data = pd.read_csv(target_file, sep='\\t', compression='gzip')\n # combine!\n logging.info(f'combining files for {source_file_base}')\n combined_data = pd.concat([source_data, target_data], axis=0)\n # deduplicate!\n combined_data.drop_duplicates(dedup_vars, inplace=True)\n # clean\n combined_data.fillna('', inplace=True)\n # filter original IDs\n if(original_ids is not None):\n combined_data = combined_data[~combined_data.loc[:, id_var].isin(original_ids)]\n # remove return characters\n for txt_var_i in all_txt_vars:\n combined_data = combined_data.assign(**{\n txt_var_i : combined_data.loc[:, txt_var_i].apply(lambda x: RETURN_CHAR_MATCHER.sub('', str(x)))\n })\n logging.info('%d/%d source/target'%(source_data.shape[0], target_data.shape[0]))\n logging.info('combined data has %d/%d data'%(combined_data.shape[0], source_data.shape[0]+target_data.shape[0]))\n # write to file\n combined_data.to_csv(combined_data_file_name, sep='\\t', compression='gzip', index=False)\n except Exception as e:\n logging.info(f'going to skip file {source_file_base} because error {e}')\n # if target file does not exist, copy the source data\n else:\n logging.info(f'copying {source_file} without combining')\n source_data = pd.read_csv(source_file, sep='\\t', compression='gzip')\n if('Unnamed: 0' in source_data.columns):\n source_data.drop('Unnamed: 0', axis=1, inplace=True)\n # fix column name mismatches\n source_data.rename(columns={'user_screen_name' : 'screen_name', 'user_id' : 'author_id'}, inplace=True)\n source_data.to_csv(combined_data_file_name, sep='\\t', compression='gzip', index=False)", "def create_target(cls, relpath, target):\r\n cls.create_file(cls.build_path(relpath), target, mode='a')", "def _targetFile(self):\n basename = os.path.basename(self.src)\n filename = os.path.join(self.target_dir, basename)\n return open(filename, 'w')", "def _CopyFiles(file_pairs):\n\n for pair in file_pairs:\n target_dir = os.path.dirname(pair.target)\n if not os.path.isdir(target_dir):\n os.makedirs(target_dir)\n copyfile(pair.generated, pair.target)", "def copy_files(self, source, target):\n\n if source == target and is_local(self.borrowed_ctx.host):\n logger.warning(\"IGNORE self-node: {}\".format(self.borrowed_ctx.host))\n return\n\n try:\n for item in os.listdir(source):\n if os.path.isfile(os.path.join(source, item)):\n logger.debug(\n \"processing {} --> {}\".format(\n os.path.join(source, item), self.borrowed_ctx.host\n )\n )\n self._sftp_channel.put(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n else:\n self.mkdir(\"%s/%s\" % (target, item), ignore_existing=True)\n self.copy_files(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n except Exception as e:\n logger.warning(\n \"Error of processing target = ({}:{}), for reason: {}\".format(\n self.borrowed_ctx.host, self.borrowed_ctx.port, e,\n )\n )\n exit(0)", "def make_source_text(input_one, input_two, input_three, output_text):\n # clear out the previous file contents\n open(output_text, 'w').close()\n # copy from three input files based on question answers\n copy_text(input_one, output_text)\n copy_text(input_two, output_text)\n copy_text(input_three, output_text)\n return output_text", "def test_input_target_file(self):\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_target_file\"\n params[\"input_target\"] = \"file\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files = list_files_folder(params[\"input\"], ext=\"fna.gz\")\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def create_source(self, source):\n if not os.path.isdir(source):\n os.makedirs(source)\n # Create a text file in the source directory.\n text_file = os.path.join(source, 'notes.txt')\n with open(text_file, 'w') as handle:\n handle.write(\"This file should be included in the backup.\\n\")\n # Create a subdirectory in the source directory.\n subdirectory = os.path.join(source, 'subdirectory')\n os.mkdir(subdirectory)\n # Create a symbolic link in the subdirectory.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n os.symlink('../include-me.txt', symlink)", "def replicate(self, source):\n names = [\n name for name in os.listdir(source)\n if not name.startswith('.')\n ]\n\n # Filter out directories and copy files\n for name in names:\n src = os.path.abspath(os.path.join(source, name))\n dst = os.path.abspath(os.path.join(self.target, name))\n\n if os.path.isfile(src):\n shutil.copy(src, dst)", "def _generate_copy_target(self, src: 'mesonlib.FileOrString', output: Path) -> None:\n if isinstance(src, File):\n instr = src.absolute_path(self.environment.source_dir, self.environment.build_dir)\n else:\n instr = src\n elem = NinjaBuildElement(self.all_outputs, [str(output)], 'COPY_FILE', [instr])\n elem.add_orderdep(instr)\n self.add_build(elem)", "def stage_input_file(workdir_path, files):\n if not isinstance(files, list):\n files = [files]\n\n for file_dict in files:\n location = urlparse(file_dict['location'])\n if 'basename' in file_dict:\n dest_path = os.path.join(workdir_path, file_dict['basename'])\n else:\n dest_path = os.path.join(workdir_path, os.path.basename(location.path))\n shutil.copy(location.path, dest_path)\n file_dict['path'] = dest_path\n\n for i, secondary_file in enumerate(file_dict.get('secondaryFiles', [])):\n stage_input_file(workdir_path, file_dict['secondaryFiles'][i])", "def create_initial_file():\n\n merge_file = tempfile.NamedTemporaryFile()\n\n # spin the sources for the base file\n for source in sort_sources(\n recursive_glob(settings[\"datapath\"], settings[\"hostfilename\"])\n ):\n\n start = \"# Start {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n end = \"\\n# End {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n\n with open(source, \"r\", encoding=\"UTF-8\") as curFile:\n write_data(merge_file, start + curFile.read() + end)\n\n # spin the sources for extensions to the base file\n for source in settings[\"extensions\"]:\n for filename in sort_sources(\n recursive_glob(\n path_join_robust(settings[\"extensionspath\"], source),\n settings[\"hostfilename\"],\n )\n ):\n with open(filename, \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n maybe_copy_example_file(settings[\"blacklistfile\"])\n\n if os.path.isfile(settings[\"blacklistfile\"]):\n with open(settings[\"blacklistfile\"], \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n return merge_file", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def process_source_file(self, path, files, source_filename):\n os.chdir(path)\n\n min_rate, max_rate, files_to_rates_dict = self.sox_get_sample_rates(files)\n if min_rate != max_rate:\n for f in files:\n if files_to_rates_dict[f] != max_rate:\n self.sox_resample(f, max_rate, os.path.dirname(source_filename))\n\n self.sox_create_source_file(files, source_filename)\n os.chdir(os.path.dirname(source_filename))\n self.sox_chop_source_audio(source_filename, self._part_length, self._backshift)\n self.remove_tmp_sources(source_filename)", "def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def build_from_paths(\n input_source: str,\n input_target: str,\n single_vocab: bool = False,\n num_words_source: int = 50000,\n num_words_target: int = 50000,\n min_count_source: int = 1,\n min_count_target: int = 1,\n) -> (Dict[str, int], Dict[str, int]):\n with ExitStack() as stack:\n logger.info(\"Building vocabulary from dataset: %s and %s\", input_source, input_target)\n files = (stack.enter_context(smart_open(path)) for path in [input_source, input_target])\n return build_vocab(\n *files,\n single_vocab=single_vocab,\n num_words_source=num_words_source,\n num_words_target=num_words_target,\n min_count_source=min_count_source,\n min_count_target=min_count_target\n )", "def copy_separate_files(source,dest1,dest2):\r\n filelist = os.listdir(source)\r\n \r\n if not os.path.exists(dest1):\r\n os.mkdir(dest1)\r\n \r\n if not os.path.exists(dest2):\r\n os.mkdir(dest2)\r\n \r\n for filename in filelist:\r\n source_file = os.path.join(source,filename)\r\n \r\n if filename[-4:] == '.png':\r\n shutil.copy(source_file,dest1)\r\n else:\r\n shutil.copy(source_file,dest2)", "def act_copy_file(self, file_source, file_target):\n try:\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy2(file_source, file_target)\n self.logger.debug('%s: Action: <copy> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file copy: %s -> %s', file_source, file_target)", "def generate(src_file_names,\r\n dst_file_name,\r\n dst_doc_file_name,\r\n dst_property_doc_file_name,\r\n name):\r\n methods = []\r\n properties = []\r\n extra_includes = []\r\n entries = (methods, properties)\r\n for src_file_name in src_file_names:\r\n check_file(src_file_name)\r\n m, p = parse_file(src_file_name)\r\n methods.extend(m)\r\n properties.extend(p)\r\n\r\n extra_includes.extend(find_extra_include(src_file_name))\r\n if len(entries[0]) == 0 and len(entries[1]) == 0:\r\n print(\"No entries found in %s.\" % src_file_name)\r\n exit(1)\r\n\r\n write_result(dst_file_name, name, entries, extra_includes, src_file_names)\r\n write_method_doc(dst_doc_file_name, entries[0])\r\n write_property_doc(dst_property_doc_file_name, entries[1])", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def copy_files(source,destination):\r\n filelist = os.listdir(source)\r\n \r\n if not os.path.exists(destination):\r\n os.mkdir(destination)\r\n \r\n for filename in filelist:\r\n source_file = os.path.join(source,filename)\r\n shutil.copy(source_file,destination)", "def copy_target(target_files, file_name, tmp_loc, dict_2):\n d_1 = OrderedDict()\n \n target_files = natsort.natsorted(target_files)\n for i in target_files:\n if '/' in i:\n if 'slideLayouts' not in i and 'slideMasters' not in i and 'theme' not in i:\n fld_name,fl_name = get_fld_fl(i)\n if os.path.exists(f'{tmp_loc}/ppt/{fld_name}/{fl_name}'):\n path = f'{tmp_loc}/ppt'\n d_1.update(rename(path, fld_name, fl_name, dict_2))\n else:\n d_1.update(rename(tmp_loc, fld_name, fl_name, dict_2))\n # else:\n # if not os.path.isfile(f'{output_path}/ppt/{i}'):\n # if os.path.isfile(f'{tmp_loc}/ppt/{i}'):\n # shutil.copyfile(f'{tmp_loc}/ppt/{i}', f'{output_path}/ppt/{i}')\n # print(\"CALLING... copy_target\")\n return d_1" ]
[ "0.6378731", "0.62601465", "0.6132644", "0.6037653", "0.5912019", "0.5846756", "0.5831473", "0.5821771", "0.57860816", "0.57359564", "0.57261264", "0.5705366", "0.5654199", "0.55903476", "0.5587134", "0.5579851", "0.55784553", "0.55677485", "0.5560209", "0.5544423", "0.5494895", "0.5494895", "0.5494895", "0.54903454", "0.5463956", "0.5455934", "0.54425454", "0.54281586", "0.5397419", "0.5387478" ]
0.7413827
0
Asks SoX for given file length in seconds and returns them as float. Returns 1000 on dry run.
def sox_get_audio_length(self, audio_file): logging.info('Getting source file length ...') result = self._process_command('soxi -D "%s"' % audio_file, PIPE) if result[1][0] != '': return float(result[1][0].strip('\n')) else: return 1000
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_duration_sox_s(audio_file_path: str) -> float:\n global FS_HZ\n assert FS_HZ is not None\n duration_n = get_duration_sox_n(audio_file_path)\n return duration_n / FS_HZ", "def get_duration_sox_n(audio_file_path: str) -> float:\n global FS_HZ\n assert FS_HZ is not None\n audiometadata = torchaudio.info(audio_file_path)\n num_frames = audiometadata.num_frames\n original_fs_hz = audiometadata.sample_rate\n duration_n = num_frames\n # TODO(theis): probably not exact value\n duration_n_resampled = round(duration_n * (FS_HZ / original_fs_hz))\n return duration_n_resampled", "def duration(file_path):\n command = [\"ffprobe\", \"-show_entries\", \"format=duration\", \"-i\", file_path]\n pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)\n out, error = pipe.communicate()\n match_object = None if error else DURATION_REGEX.search(out.decode('utf-8'))\n if match_object is None:\n return 0\n length = float(match_object.group(1)) / 60\n return length", "def get_track_length(track_path):\n track_extension = os.path.splitext(track_path)[1]\n if track_extension:\n try:\n mutagen_track = File(track_path)\n track_total_length = mutagen_track.info.length\n except:\n track_total_length = 0\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=f\"Audio file incorrect : {track_path}\")\n finally:\n track_length_formated = strftime(\n '%M:%S', gmtime(track_total_length))\n track_length_label.configure(text=track_length_formated)\n track_pos_slider.configure(to=track_total_length)\n return track_total_length", "def get_duration(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=duration -of default=nokey=1:noprint_wrappers=1 ' +\n filename).split()\n pid = subprocess.run(cmd, universal_newlines=True,\n stdout=subprocess.PIPE)\n if pid.returncode != 0:\n return None\n\n duration_exp = pid.stdout.rstrip()\n try:\n duration = float(duration_exp)\n except:\n duration = 0.\n return duration", "def get_duration(f):\n return 0", "def get_recording_size(file_name):\n recording_size = check_output(\n [\"mp3info\", \"-p\", \"%m:%s\\n\", \"{}\".format(file_name)]).decode(\"utf-8\")\n print(\"Recording size:\", str(recording_size))\n\n minutes_seconds = (int(recording_size.split(\":\")[0]) * 60)\n seconds = int(recording_size.split(\":\")[1].replace(\"\\n\", \"\"))\n recording_seconds_size = minutes_seconds + seconds\n print(\"Recording seconds size:\", str(recording_seconds_size))\n\n return recording_seconds_size", "def to_length_secs(self):\n return (self.bpm / 60.0) / self.period", "def get_song_length_milliseconds(result):\n return int(result['metadata']['music'][0]['duration_ms'])", "def get_duration(file):\n cmd = 'ffprobe -i \"{}\" -show_entries format=duration -v quiet -of csv=\"p=0\"'.format(file)\n try:\n output = subprocess.check_output(\n cmd,\n shell=True, # Let this run in the shell\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print(e.output)\n output = 0\n # return round(float(output)) # ugly, but rounds your seconds up or down\n return float(output)", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def duration_in_seconds(self):\n \"Should not set track length\"\n return self.duration / float(self.samplerate)", "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)", "def svn_fs_file_length(*args):\r\n return _fs.svn_fs_file_length(*args)", "def length(self):\n if self.type == 2:\n raise ValueError('impossible to compute length'\n ' for type 2 (asynchronous) file')\n\n return sum(message.time for message in self)", "def __convert_file_size(self, file_size:float)->float:\n return file_size * 1000000", "def get_song_seconds_remaining(result):\n remaining_ms = get_song_length_milliseconds(result) - get_song_elapsed_milliseconds(result)\n return int(remaining_ms / 1000)", "def duration(self):\n with audioread.audio_open(self.path) as f:\n return f.duration", "def adapt_duration(audio_file_path: str) -> float:\n global MAX_SOUND_DURATION_S\n assert MAX_SOUND_DURATION_S is not None\n global FS_HZ\n assert FS_HZ is not None\n global transformer_top\n assert transformer_top is not None\n duration_n = get_duration_sox_n(audio_file_path)\n # trim to max duration\n duration_n = min(MAX_SOUND_DURATION_S * FS_HZ, duration_n)\n # round-up to the resolution of the VQVAE\n vqvae_top_resolution_n = get_vqvae_top_resolution_n()\n duration_n = vqvae_top_resolution_n * (max(\n transformer_top.shape[1],\n round(duration_n / vqvae_top_resolution_n)))\n return duration_n", "def getDuration(fn: str) -> float:\n return QueryWav(fn).duration", "def us(self):\n return 1000 * 1000 * self.read()", "def _duration(self):\n if getattr(self, '_duration_cache', None):\n return self._duration_cache\n duration = extractMetadata(guessParser(\\\n InputIOStream(self))).get('duration')\n if not duration:\n raise Exception(u'Not an audio file')\n else:\n duration = duration.seconds\n self._duration_cache = duration\n return duration", "def fileTime(ft):\n return datetime(1601, 1, 1) + timedelta(microseconds=ft / 10)", "def getStartSpeed(self):\n cmd_string = '?1'\n data = self.sendRcv(cmd_string)\n self.state['start_speed'] = int(data)\n return self.state['start_speed']", "def real_time(self):\n try:\n # TODO: Update for resuming runs\n with open(path.join(self.run_dir, \"TIMINGS\", \"timings.001\"), \"r\") as f:\n text = f.read()\n r = re.match(r\" Total time for loop was(?: *)(.*?)(?: *)seconds\", text, re.DOTALL + re.MULTILINE)\n if not r:\n logger.warning(\"Bad format in timings file. The real time could not be read.\")\n return float(\"nan\")\n else:\n return float(r.group(1))\n except FileNotFoundError:\n return float(\"nan\")", "def extract_time(file):\n\n for line in file:\n\n if \"Execution time\" in line:\n # this is of the form: <li>Execution time: 412.930 s\n return float(line.split(\":\")[1].strip().split(\" \")[0])\n\n elif \"(seconds)\" in line:\n # this is the older form -- split on \"=\"\n # form: <p><b>Execution Time</b> (seconds) = 399.414828\n return float(line.split(\"=\")[1])\n\n raise RuntimeError()", "def seconds(input=None):\n return int(get(input))", "def _convert_sfx_timestamp(ts: int) -> float:\n return float(ts) / 1000", "def get_sp500():\n sp500 = si.get_live_price(\"^GSPC\")\n sp500_trim = \"%.2f\" % sp500\n\n _time = datetime.datetime.now().timetuple()\n _time = time.mktime(tuple(_time))\n _time_label = f\"test\"\n\n return float(sp500_trim), int(_time)", "def getCutoffSpeed(self):\n cmd_string = '?3'\n data = self.sendRcv(cmd_string)\n self.state['cutoff_speed'] = int(data)\n return self.state['cutoff_speed']" ]
[ "0.695703", "0.6086739", "0.5925723", "0.576181", "0.5759218", "0.5731954", "0.5640799", "0.56291753", "0.5601044", "0.5574256", "0.5565806", "0.55071586", "0.5486385", "0.54476947", "0.54070485", "0.53769195", "0.53756505", "0.53739536", "0.5329043", "0.5325697", "0.5305705", "0.5298909", "0.5296886", "0.5290624", "0.52838695", "0.5265299", "0.525693", "0.52439", "0.5237251", "0.5225381" ]
0.61282736
1
Using SoX chops source audio file into parts of given length. Chopping is done in such a way that every next audio part contains one (1) second from the previous.
def sox_chop_source_audio(self, source_filename, part_length, backshift=0): logging.info('Preparing for source file chopping ...') wav_length = self.sox_get_audio_length(source_filename) if wav_length <= part_length: parts_count = 1 else: # Calculate audio length with one second back shift. Also known as possum formula %) parts_count = int(round(wav_length / float(part_length - backshift), 0)) parts_count_len = len(str(parts_count)) logging.info('Chopping information:\n' ' Source file length: %(source)s second(s)\n' ' Requested part length: %(part)s second(s)\n' ' Backshift: %(back)s second(s)\n' ' Parts count: %(parts_cnt)s', source=wav_length, part=part_length, parts_cnt=parts_count, back=backshift) logging.info('Starting chopping ...') for index in range(0, parts_count): start_pos = index * part_length if start_pos > 0: # We need to shift all but the first part for `backshift` seconds backward # to not to loose some phrases on chopping. start_pos -= (index * backshift) part_number = str(index + 1).rjust(parts_count_len, '0') # This will strip ID3Tags from file - they are identical thus uninformative. comment = '--comment ""' target = part_number logging.info( 'Working on %s.mp3 [%s/%s - %s%%] ...', target, int(part_number), parts_count, int(int(part_number) * 100 / parts_count) ) command = 'sox -V1 "%(source)s" %(comment)s %(target)s.mp3 trim %(start_pos)s %(length)s' % { 'source': source_filename, 'target': target, 'start_pos': start_pos, 'length': part_length, 'comment': comment } self._process_command(command, PIPE) logging.info('Chopped.\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cut_audio(old_path, new_path, start, end):\r\n fs, data = wavfile.read(old_path)\r\n indx_start = int(start*fs)\r\n indx_end = int(end*fs)+1\r\n wavfile.write(new_path,fs,data[indx_start:indx_end])\r\n\r\n return True", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def slice_recording(path_recording, path_metadata_filepath_duration):\n\n metadata_filepath_duration = open(path_metadata_filepath_duration, 'r')\n\n start = 0.0\n\n for line in metadata_filepath_duration:\n filepath, duration = line.split(\" | \")\n target_filepath = re.sub('/Mixtures/', '/mic_recordings/Mixtures/', filepath)\n target_parentpath = re.sub('/mixture.wav', '', target_filepath)\n\n # creating folder if the folder doesnot exist\n try:\n os.makedirs(target_parentpath)\n except OSERROR as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target_parentpath):\n pass\n\n delta_t = float(duration)\n\n # calling ffmpeg to slice the wav file into its respective sizes\n subprocess.call([\"ffmpeg\", \"-i\", path_recording, \"-ss\", str(start), \"-t\", str(delta_t), \"-acodec\", \"copy\", target_filepath])\n\n # resetting the start for next file in line\n start += delta_t\n\n metadata_filepath_duration.close()", "def cut_sample(whole_audio_data, num_samples):\n len_audio_data = len(whole_audio_data)\n if num_samples >= len_audio_data:\n raise Exception(\"Length of to be generated signal cannot be greater and equal to original audio signal\")\n sys.exit(-1)\n\n # generate a random number which is used as a first index to cut off\n ind = random.randint(0, len_audio_data-num_samples)\n gen_data = whole_audio_data[ind:ind+num_samples]\n return gen_data", "def truncate(data, sequence_length=3000):\n res = []\n for sample in data:\n if len(sample) > sequence_length:\n sample = sample[:sequence_length]\n res.append(sample)\n else:\n str_added = [PAD_STR] * (sequence_length - len(sample))\n sample += str_added\n res.append(sample)\n return res", "def get_chop_times_indices(times, chop_length=180., chop_nsamp=None, strict=False, exit_on_error=False):\n\n n_times = len(times)\n\n try:\n data_type = times.dtype()\n except:\n data_type = np.float64\n\n if chop_nsamp: # compute chop based on number of samples\n n_chops = int(n_times // chop_nsamp)\n if n_chops == 0:\n n_chops = 1\n n_times_chop = chop_nsamp\n else: # compute chop based on duration given\n dt = times[1] - times[0] # time period between two time samples\n n_chops, t_rest = np.divmod(times[-1], chop_length)\n n_chops = int(n_chops)\n\n # chop duration in s\n if strict:\n # ToDo check for times[-1] < chop_length\n chop_len = chop_length\n else:\n chop_len = chop_length + t_rest // n_chops # add rest to chop_length\n\n msg1 = [\n \" -> number of chops : {}\".format(n_chops),\n \" -> calculated chop legth: {}\".format(chop_len),\n \" -> rest [s] : {}\".format(t_rest),\n \"-\" * 40,\n \" -> chop length : {}\".format(chop_length),\n \" -> numer of timepoints : {}\".format(n_times),\n \" -> strict : {}\".format(strict),\n \"-\" * 40,\n \" -> exit on error : {}\\n\".format(exit_on_error)\n ]\n\n try:\n n_times_chop = int(chop_len / dt)\n except:\n if exit_on_error:\n msg = [\"EXIT on ERROR\"]\n msg.extend(msg1)\n logger.exception(\"\\n\".join(msg))\n assert (chop_len > 0), \"Exit => chop_len: {}\\n\".format(chop_len)\n else: # data size < chop_length\n msg = [\"setting <chop_len> to number of time points!!!\"]\n msg.extend(msg1)\n logger.error(\"\\n\".join(msg))\n\n n_times_chop = n_times\n n_chops = 1\n msg = [\"data length smaller then chop length !!!\",\n \" --> Adjusting:\",\n \" -> number of chops: {}\".format(n_chops),\n \" -> chop time : {}\".format(n_times_chop)\n ]\n logger.warning(\"\\n\".join(msg))\n\n # check if chop length is larger than max time (e.g. if strict=True)\n if n_times_chop > n_times:\n n_times_chop = n_times\n\n # compute indices for each chop\n ix_start = np.arange(n_chops) * n_times_chop # first indices of each chop\n ix_end = np.append((ix_start - 1)[1:], n_times - 1) # add last entry with last index\n\n # chop indices\n chop_indices = np.zeros([n_chops, 2], dtype=np.int)\n chop_indices[:, 0] = ix_start\n chop_indices[:, 1] = ix_end\n\n # times in s\n chop_times = np.zeros([n_chops, 2], dtype=data_type)\n chop_times[:, 0] = times[ix_start]\n chop_times[:, 1] = times[ix_end]\n\n return chop_times, chop_indices", "def chunked(size, source):\n for i in range(0, len(source), size):\n yield source[i : i + size]", "def getlastslice(inputtaken):\n output = inputtaken.split('.')[0] + 'last.wav'\n with contextlib.closing(wave.open(inputtaken, 'r')) as framesget:\n frames = framesget.getnframes()\n rate = framesget.getframerate()\n duration = frames / float(rate)#duration of the input file\n duration1 = duration-0.1\n win = wave.open(inputtaken, 'rb')\n wout = wave.open(output, 'wb')\n time0, time1 = duration, duration1 # get audio for the last 100 miliseconds\n start0 = int(time1 * win.getframerate())\n start1 = int(time0 * win.getframerate())\n win.readframes(start0)\n frames = win.readframes(start1-start0)\n wout.setparams(win.getparams())\n wout.writeframes(frames)\n win.close()\n wout.close()\n return output", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices", "def trim_audio(data, rate=44100, start_trim=0, end_trim=0, log=False):\n chop = np.copy(data[start_trim*rate : len(data)-end_trim*rate])\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n logging.info(\"Removed [%d s, %d s] from [start, end] of recording.\" %\n (start_trim, end_trim))\n return chop", "def make_chunks(self, audio_segment, chunk_length):\r\n\t\tnumber_of_chunks = math.ceil(len(audio_segment) / float(chunk_length))\r\n\t\treturn [audio_segment[i * chunk_length:(i + 1) * chunk_length]\r\n\t\t\t\tfor i in range(int(number_of_chunks))]", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def crop_to_segments(audio, rate, segments):\n crop_start = min(segment.start_frame for segment in segments)\n crop_end = max(segment.end_frame for segment in segments)\n\n for segment in segments:\n segment.start_frame -= crop_start\n segment.end_frame -= crop_start\n\n return audio[crop_start:crop_end], rate, segments", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def cut_wavs(src, tgt, start, end):\n existed = os.path.exists(tgt)\n cmd = (\"sox\", \"--ignore-length\", src, \"-c 1 -r 16000 -b 16\", tgt, \"trim\", str(start), str(end - start))\n print u\" \".join(cmd)\n os.system(u\" \".join(cmd))\n return existed", "def chunked(self, length, overlap):\n def new_gen():\n buffer = self.read(length)\n while True:\n yield np.array([buffer]) #pack into one more dimension\n new_elems = self.read(length - overlap)\n if new_elems.shape[0] == 0:\n # Reached the end of the stream\n break\n buffer[:overlap] = buffer[length-overlap:]\n buffer[overlap:] = new_elems\n return Stream(new_gen(), chunk_size=1)", "def split_audio_into_chunks(sampling_rate, amplitude_vector, chunk_size):\n \n col_size = int(chunk_size / ((1 / sampling_rate) * 1000))\n whole = int(len(amplitude_vector) / col_size)\n first_partition_index = whole*col_size\n first_partition = amplitude_vector[:first_partition_index]\n second_partition = amplitude_vector[first_partition_index:]\n return first_partition.reshape((whole, col_size)), second_partition", "def split( self, rSilenceTresholdPercent = 0.1, rSilenceMinDuration = 0.3, nExtractJustFirsts = -1 ):\n nLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 ) \n print( \"INF: sound.Wav.split: splitting a sound of %5.3fs, using silence limits at %d for %5.3fs\" % (self.rDuration, nLimit, rSilenceMinDuration) ) \n aSplitted = []\n \n precalcWavIsNotSilence = np.abs(self.data)>nLimit\n\n #~ print self\n \n nCurrentPos = 0 # in data index (not sample)\n nSilenceMinLenData = rSilenceMinDuration * self.nAvgBytesPerSec * 8 / self.nNbrBitsPerSample\n while( nCurrentPos < len(self.data) ):\n \n # first find the beginning of a sound \n nFirstNonSilenceIndex = findFirstTrueValue( precalcWavIsNotSilence[nCurrentPos:] )\n #~ print( \"nFirstNonSilenceIndex (brut): %d\" % nFirstNonSilenceIndex )\n if( nFirstNonSilenceIndex == -1 ):\n # all remaining sound are silence!\n break\n nFirstNonSilenceIndex += nCurrentPos\n nNumFirstSample = nFirstNonSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found a sound at sample %d\" % nNumFirstSample )\n nCurrentPos = nFirstNonSilenceIndex # so at the end, we're stopping\n \n # then find end\n nEndOfSilence = nNumFirstSample*self.nNbrChannel # init of the loop\n while( nEndOfSilence < len(self.data) ):\n #nFirstSilenceIndex = np.argmax( np.abs(self.data[nEndOfSilence:])<=nLimit )\n nFirstSilenceIndex = findFirstFalseValue( precalcWavIsNotSilence[nEndOfSilence:] ) \n #~ print( \"nFirstSilenceIndex (brut): %d (from %d)\" % (nFirstSilenceIndex, nEndOfSilence) )\n if( nFirstSilenceIndex == -1 ):\n break\n nFirstSilenceIndex += nEndOfSilence\n # ensure there's enough silence\n nEndOfSilence = findFirstTrueValue( precalcWavIsNotSilence[nFirstSilenceIndex:] )\n #~ print( \"nEndOfSilence (brut): %d (data: %d) (offset: %d)\" % (nEndOfSilence, self.data[nFirstSilenceIndex+nEndOfSilence],nEndOfSilence + nFirstSilenceIndex) )\n # positionnate onto the end of the silence for next time\n if( nEndOfSilence == -1 ):\n nCurrentPos = len(self.data)\n else:\n nCurrentPos = nEndOfSilence + nFirstSilenceIndex\n \n if( nEndOfSilence > nSilenceMinLenData or nEndOfSilence == -1 ):\n break\n nEndOfSilence += nFirstSilenceIndex\n # while - end\n \n # each time we're out, we've got a silence or we're at the end => new split\n if( nFirstSilenceIndex == -1 ):\n break\n nNumLastSample = nFirstSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found the end of that sound at sample %d\" % nNumLastSample )\n if( nNumLastSample - nNumFirstSample > 4000 ):\n w = Wav()\n w.copyHeader( self )\n w.data = np.copy(self.data[nNumFirstSample*self.nNbrChannel:nNumLastSample*self.nNbrChannel])\n nPeakMax = max( max( w.data ), -min( w.data ) )\n if( nPeakMax > self.getSampleMaxValue() / 8 ): # remove glitch sound\n w.updateHeaderSizeFromDataLength()\n print( \"INF: sound.Wav.split: new split of %5.2fs\" % w.rDuration )\n aSplitted.append( w )\n #~ print( \"nCurLocalVs: %s\" % nCurLocalVs )\n if( nExtractJustFirsts != -1 and nExtractJustFirsts == len(aSplitted) ):\n print( \"WRN: sound.Wav.split: got enough split (%d), leaving...\" % len(aSplitted) )\n break\n # while - end\n print( \"INF: sound.Wav.split: created %d wav(s)\" % len( aSplitted ) )\n return aSplitted", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def trim_num_parts(file: str,\r\n num_parts: int,\r\n equal_distribution: bool = False,\r\n clip_length: Union[float, int, str] = 30,\r\n random_start: bool = True,\r\n random_sequence: bool = True) -> Optional[List]:\r\n num_parts = int(num_parts)\r\n clip_length = int(clip_length)\r\n split_part = duration(file) / num_parts\r\n start = 0\r\n # Start splitting the videos into 'num_parts' equal parts.\r\n video_list = []\r\n for idx in range(1, num_parts + 1):\r\n start, end = start, start + split_part\r\n trim_video(file, filename(file, idx), start, end)\r\n start += split_part\r\n video_list.append(filename(file, idx))\r\n if equal_distribution:\r\n for file in video_list:\r\n if clip_length <= split_part:\r\n start, end = 0, clip_length\r\n if random_start:\r\n start = random.randint(1, int(duration(file)))\r\n end = start + clip_length\r\n file, temp = quick_rename(file)\r\n trim_video(temp, file, start, end)\r\n time.sleep(2.0)\r\n if random_sequence:\r\n return random.shuffle(video_list)\r\n else:\r\n return video_list", "def chunkify(song):\n assert len(song) >= CHUNK_SIZE * 2\n for i in xrange(0, len(song) - CHUNK_SIZE, CHUNK_SIZE // 2):\n yield np.fft.rfft(song[i: i + CHUNK_SIZE])", "def trim(input_file: str, output_path: str, trim_interval: int=5,\n num_workers: int=None, verbose_level=0):\n prefix = input_file.split(os.sep)[-1][:-4] # Get name and remove extension\n duration = float(syscommand.system('soxi -D ' + input_file))\n if duration == 0:\n # For some reason, the soxi command failed with some large files\n # tested. This is an attempt to get the duration in that case.\n import wave\n import contextlib\n with contextlib.closing(wave.open(input_file, 'r')) as f:\n frames = f.getnframes()\n rate = f.getframerate()\n duration = frames / float(rate)\n trims = list(np.arange(0, duration, trim_interval))[:-1]\n if str(verbose_level) == '2' and workers == 1:\n # This code is duplicated for debugging purposes\n for t in trims:\n trim_audio(audio_path=input_file, output_path=output_path,\n name=prefix + '_' + str(t) + '_' + str(duration),\n position=t, duration=trim_interval,\n verbose_level=verbose_level)\n else:\n # Make parallel calls to trim the audio file\n with concurrent.futures.ProcessPoolExecutor(max_workers=num_workers) \\\n as executor:\n futures = [\n executor.submit(fn=trim_audio,\n audio_path=input_file,\n output_path=output_path,\n name=prefix + '_' + str(t) + '_' +\n str(duration),\n position=t,\n duration=trim_interval,\n verbose_level=verbose_level)\n for t in trims]\n\n kwargs = {\n 'total': len(futures),\n 'unit': 'files',\n 'unit_scale': True,\n 'leave': True\n }\n for f in tqdm(concurrent.futures.as_completed(futures), **kwargs):\n pass", "def speedx(self, sound_array, factor): # http://zulko.github.io/blog/2014/03/29/soundstretching-and-pitch-shifting-in-python/\n indices = np.round(np.arange(0, len(sound_array), factor))\n indices = indices[indices < len(sound_array)].astype(int)\n return sound_array[indices.astype(int)]", "def segment(sound_file, spec_file, ms_step, pix_per_s, sound_output_dir, spec_output_dir):\n pix_per_ms = pix_per_s/1000\n sound = AudioSegment.from_wav(sound_file)\n start, stop = 0, ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms\n spec = Image.open(spec_file)\n chopping = True\n while stop <= len(sound):\n \n # Split sound\n chunk = sound[start:stop]\n chunk.export(sound_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".wav\", format=\"wav\")\n\n # Split spectrogram\n w, h = spec.size\n cropped_spec = spec.crop((start_pixel, 0, stop_pixel, h))\n cropped_spec.save(spec_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".png\")\n\n start += ms_step\n stop += ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms", "def trim_silence(T, hz, signal):\n N = T * hz\n extra = len(signal) - N\n c = np.abs(signal).cumsum()\n c = c[-extra:] - c[:extra]\n i = np.argmax(c)\n print(f'Keeping {T:.2g} of {len(signal)/hz:.2g} seconds'\n f' starting at +{i/hz:.2f} seconds')\n return signal[i:i+N]", "def cut_and_eq(song_name):\r\n print(\"[{}] STATUS: Loading...\".format(song_name))\r\n sound_file = AudioSegment.from_mp3(song_name)\r\n print(\"[{}] STATUS: Loaded, now processing...\".format(song_name))\r\n sound_file = match_target_amplitude(sound_file, TARGET_VOLUME) # Amplify beforehand to prevent over-zealous cutting\r\n chunks = split_on_silence(sound_file, SILENCE_CUTOFF, THRESHOLD, keep_silence=ACCEPTABLE_SILENCE)\r\n\r\n if len(chunks) > 1:\r\n print(\"[{}] ERROR: Too many chunks ({}) cannot export\".format(song_name, len(chunks)))\r\n return song_name\r\n else:\r\n output = AudioSegment.empty()\r\n for chunk in chunks:\r\n output += chunk\r\n\r\n new_name = song_name.split(\".\")[0]\r\n print(\"[{}] STATUS: Processed, now exporting...\".format(song_name))\r\n metadata = mediainfo(song_name).get('TAG',{})\r\n output.export(OUTPUT_NAME_FORMAT.format(new_name), format=OUTPUT_FORMAT, tags=metadata)\r\n print(\"[{}] STATUS: Exported to {} - cleaned.{}\".format(song_name, new_name, OUTPUT_FORMAT))\r\n return None", "def apply_subspace(\n noisy_signal,\n frame_len=256,\n mu=10,\n lookback=10,\n skip=2,\n thresh=0.01,\n data_type=np.float32,\n):\n\n scnr = Subspace(frame_len, mu, lookback, skip, thresh, data_type)\n processed_audio = np.zeros(noisy_signal.shape)\n n = 0\n hop = frame_len // 2\n while noisy_signal.shape[0] - n >= hop:\n processed_audio[n : n + hop,] = scnr.apply(noisy_signal[n : n + hop])\n\n # update step\n n += hop\n\n return processed_audio", "def splice_audio(file_path, start, end):\n audio = AudioSegment.from_mp3(file_path)\n\n # Pull thumbnail\n tags = ID3(file_path)\n thumbnail = tags.get(\"APIC:\").data\n\n # Pull any other tags from og audio file\n tags = mediainfo(file_path).get('TAG', {})\n\n # Get start and and end paramters\n # to pull the audio splice of interest\n start = timestamp_to_milliseconds(start)\n end = timestamp_to_milliseconds(end)\n\n spliced = audio[start:end]\n spliced.export(\n file_path,\n format=\"mp3\",\n tags=tags\n )\n\n audiofile = eyed3.load(file_path)\n audiofile.tag.images.set(3, thumbnail, 'image/jpeg')\n audiofile.tag.save()", "def apply_fourier_transform(chunked_audio):\n pass", "def unchunkify(chunks):\n recreated_chunks = list(map(lambda x: np.fft.irfft(combine_phase_and_power(*x)), chunks))\n total_length = len(recreated_chunks) * CHUNK_SIZE // 2\n output = np.zeros(total_length)\n window = np.power(np.sin(np.linspace(0, np.pi, CHUNK_SIZE)), 2)\n \n for i, j in enumerate(xrange(0, total_length - CHUNK_SIZE, CHUNK_SIZE // 2)):\n o = window * recreated_chunks[i]\n \n output[j: j+CHUNK_SIZE] += o\n return output" ]
[ "0.61208063", "0.60243565", "0.5976565", "0.584501", "0.58333117", "0.57951015", "0.5730551", "0.56922776", "0.5681019", "0.5678523", "0.5641848", "0.5629693", "0.5624784", "0.55397713", "0.5506439", "0.54790694", "0.5460991", "0.54587924", "0.54231524", "0.54199386", "0.54034495", "0.540116", "0.5393647", "0.53484696", "0.5303379", "0.5302274", "0.52917534", "0.5291159", "0.5288627", "0.5272514" ]
0.7706262
0
Removes temporary created source files.
def remove_tmp_sources(source_filename): logging.info('Removing temporary files ...') source_dir = os.path.dirname(source_filename) if os.path.exists(source_filename): os.remove(source_filename) for f in os.listdir(source_dir): if f.startswith('tmp_'): os.remove(os.path.join(source_dir, f))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhIDA:: [!] Unexpected error while removing temporary files.\")", "def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass", "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "def _clean_up_optimization():\n for (root, dirs, files) in walk(TEMP_MODULES_DIR_PATH, topdown=False):\n for file in files:\n if file.startswith(\"__temp_\"):\n remove(f\"{root}/{file}\")\n try:\n rmdir(root)\n except OSError:\n G.warn_(f\"Unidentified file found in temporary directory: {root}\")", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def clear_tempfiles(self, remove=True):\n while self._tempfiles:\n self.pop(remove)\n self.push()", "def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def del_tmp() -> None:\n for elem in os.listdir('./tmp'):\n path = f\"./tmp/{elem}\"\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)", "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def clearTemp():\n Installer.tempDir.rmtree(safety='Temp')", "def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)", "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "def cleanup(self):\r\n if self.tempDirectory != None:\r\n shutil.rmtree(self.tempDirectory, True)\r\n self.tempDirectory = None", "def clean():\n local('rm -fr %s' % os.path.abspath(env.config['destination']))", "def clear_temp(remove_all=True):\n tf_list = []\n\n if remove_all:\n temp_dir = _get_temp_dir(False)\n temp_dir += (\n os.path.sep if os.path.sep not in temp_dir[len(temp_dir) - 1] else \"\"\n )\n tf_list = glob.glob(\"{0}TESS_*\".format(temp_dir))\n else:\n global _tempfiles\n\n tf_list = list(_tempfiles)\n _tempfiles.clear()\n\n for tf in tf_list:\n if os.path.isfile(tf):\n _remove_file(tf)", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def clean(ctx):\n ctx.run(\n \"find . -type f -name '*.pyc' -delete && \"\n \"find . -type f -name '*.pyo' -delete && \"\n \"rm -rf .pytest_cache && \"\n \"rm -rf .mypy_cache\"\n )", "def _clean_up_temporary_files(dataset_dir):\n return", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def reset():\n local('cd {{ project_name }} && \\\n rm -rf static && rm -rf gzip && rm -rf build')", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def clean(session):\n clean_dirs = (\n get_path(\".cache\"),\n get_path(\".coverage\"),\n get_path(\".pytest_cache\"),\n get_path(\"__pycache__\"),\n get_path(\"build\"),\n get_path(\"dist\"),\n get_path(\"docs\", \"__pycache__\"),\n get_path(\"docs\", \"build\"),\n get_path(\"scripts\", \"macos\", \"__pycache__\"),\n get_path(\"src\", \"python\", \"bezier.egg-info\"),\n get_path(\"src\", \"python\", \"bezier\", \"__pycache__\"),\n get_path(\"tests\", \"__pycache__\"),\n get_path(\"tests\", \"functional\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"hazmat\", \"__pycache__\"),\n get_path(\"wheelhouse\"),\n )\n clean_globs = (\n get_path(\".coverage\"),\n get_path(\"*.mod\"),\n get_path(\"*.pyc\"),\n get_path(\"docs\", \"abi\", \"example\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyc\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyd\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.so\"),\n get_path(\"src\", \"fortran\", \"*.o\"),\n get_path(\"tests\", \"*.pyc\"),\n get_path(\"tests\", \"functional\", \"*.pyc\"),\n get_path(\"tests\", \"unit\", \"*.pyc\"),\n )\n for dir_path in clean_dirs:\n session.run(shutil.rmtree, dir_path, ignore_errors=True)\n for glob_path in clean_globs:\n for filename in glob.glob(glob_path):\n session.run(os.remove, filename)", "def clean_local():\n local('rm -fr build')\n local('mkdir -p build')", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def clean_python(context):\n context.run(\"find . -name '*.pyc' -exec rm -f {} +\")\n context.run(\"find . -name '*.pyo' -exec rm -f {} +\")\n context.run(\"find . -name '*~' -exec rm -f {} +\")\n context.run(\"find . -name '__pycache__' -exec rm -fr {} +\")", "def remove_local():\n\n try:\n # if str(Settings.SKIP_DELETE) == \"True\":\n # Settings.maybe_print(\"skipping local remove\")\n # return\n # Settings.print('Deleting Local File(s)')\n # delete /tmp\n tmp = File.get_tmp()\n if os.path.exists(tmp):\n shutil.rmtree(tmp)\n Settings.print('Local File(s) Removed')\n else:\n Settings.print('Local Files Not Found')\n except Exception as e:\n Settings.dev_print(e)", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()" ]
[ "0.81248486", "0.76791877", "0.75705147", "0.7484428", "0.7383845", "0.7069873", "0.7018626", "0.7002822", "0.69841164", "0.695557", "0.69193155", "0.6911832", "0.6905299", "0.6900335", "0.68992656", "0.6885628", "0.68790567", "0.6870767", "0.6839062", "0.6825669", "0.6798219", "0.67966276", "0.677906", "0.6753032", "0.67436683", "0.6735631", "0.67354923", "0.6732608", "0.6723357", "0.6718543" ]
0.796507
1
Load Configurations from config.ini file located in the same directory
def load_config(): config = ConfigParser() config.read(os.path.join(os.path.dirname(__file__), 'config.ini')) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_config():\n config = configparser.ConfigParser()\n config.read('config.ini')\n return config", "def load_config():\n config_file = os.path.join(\n Path(os.path.dirname(os.path.realpath(__file__))).parent,\n \"config.ini\"\n )\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n app_config = configparser.ConfigParser()\n app_config.read(config_file)\n return app_config['uberoo']", "def load_configurations() :\n\n local_path = os.path.dirname(os.path.abspath(__file__))\n print(local_path)\n file_path = local_path + os.sep + 'conf.ini'\n parser = configparser.ConfigParser()\n\n if os.path.exists(file_path) :\n config = parser.read(file_path)\n else :\n parser['PATH'] = {}\n parser['PATH']['PATH_TO_DB'] = os.path.expanduser('~/inlusio_data/InlusioDB_Juni_2015.sqlite')\n parser['PATH']['PHYSIO_PATH'] = os.path.expanduser('~/inlusio_data')\n print('Creating new configuration file!!!')\n print('Please fit conf.ini to your local data path!')\n with open(file_path, 'w') as configfile:\n parser.write(configfile)\n\n return parser", "def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(os.path.join(get_current_directory(), 'citi.config'))\n\treturn cfg", "def parse_config():\n config_path = Path(\"config.ini\")\n if config_path.exists():\n config.read(config_path)\n else:\n config[\"database\"] = {\"location\": \"image-database.db\"}\n config[\"images\"] = {\"extensions\": \".jpeg,.jpg,.png,.gif,.tiff\"}\n with open(config_path, \"w\") as configfile:\n config.write(configfile)\n config.read(config_path)", "def parse_config():\n config_file = glob.glob('config.ini')\n parser = ConfigParser()\n if config_file:\n parser.read(config_file)\n else:\n cwd = os.path.abspath(os.path.dirname(__file__))\n config_file = os.path.join(cwd, 'default_config.ini')\n parser.read(config_file)\n return _parse_config(parser)", "def load_config(filename):\n filepaths = []\n for dirpath in os.path.expanduser('~'), os.curdir, '':\n try:\n filepath = os.path.join(dirpath, filename)\n filepaths.append(filepath)\n with open(filepath, 'r') as f:\n return Config(yaml.safe_load(f))\n except IOError:\n pass\n raise IOError('Configuration file not found: ' + ', '.join(filepaths))", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(join(get_current_path(), 'ib.config'))\n\treturn cfg", "def load_config():\n global config\n with open('config.yml', 'r') as file:\n config = yaml.load(file)", "def load_config(config_path):\n global config\n with open(config_path) as config_file:\n config = munchify(yaml.safe_load(config_file))", "def read_config(self, config_filename):", "def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)", "def load_config():\n proj_dir = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(proj_dir, \"config.yml\")\n conf = yaml.safe_load(open(config_path))\n return conf", "def load(file):\n _config.load(file)", "def load_config(configfile=\"../data/test.cfg\"):\n\n config = configparser.ConfigParser()\n config.read([configfile])\n return config", "def load_settings(env=\"prod\"):\n global config\n config = configparser.SafeConfigParser()\n config.read(CONFIG_FILES.get(env))", "def load_configuration(configuration_file=None):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if not isinstance(configuration_file, str):\n if os.path.isfile(os.getenv(\"HOME\") + \"/PATH.ini\"):\n configuration_file = os.getenv(\"HOME\") + \"/PATH.ini\"\n\n if not os.path.isfile(configuration_file):\n raise FileNotFoundError(\n \"No Configuration File 'PATH.ini' found. Please create one in your home directory \"\n \"or provide the path via the argument parsing -c.\")\n else:\n logging.info(\"Using configuration file: %s\" % configuration_file)\n\n config = configparser.ConfigParser(interpolation=ExtendedInterpolation())\n config.read(configuration_file)\n return config", "def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)", "def load_config(self, config_file):\n self.config = ConfigParser.ConfigParser()\n self.config.read(config_file)", "def onLoadConfig(self, inifile):\n cp = ConfigParser(self.defaults)\n cp.readfp(inifile)\n depth = self.getDepth(cp)\n self.baseurl = urljoin(self.inipath, depth)\n # create child loaders for any other l10n.ini files to be included\n try:\n for title, path in cp.items('includes'):\n # skip default items\n if title in self.defaults:\n continue\n # add child config parser\n self.addChild(title, path, cp)\n except NoSectionError:\n pass\n # try to load the \"dirs\" defined in the \"compare\" section\n try:\n self.dirs.extend(cp.get('compare', 'dirs').split())\n except (NoOptionError, NoSectionError):\n pass\n # try getting a top level compare dir, as used for fennec\n try:\n self.tld = cp.get('compare', 'tld')\n # remove tld from comparison dirs\n if self.tld in self.dirs:\n self.dirs.remove(self.tld)\n except (NoOptionError, NoSectionError):\n self.tld = None\n # try to set \"all_path\" and \"all_url\"\n try:\n self.all_path = cp.get('general', 'all')\n self.all_url = urljoin(self.baseurl, self.all_path)\n except (NoOptionError, NoSectionError):\n self.all_path = None\n self.all_url = None\n return cp", "def readConfig(filepath=None):\n result = None\n if filepath is None:\n filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"res/\", \"config.ini\")\n if os.path.exists(filepath):\n config = ConfigParser.ConfigParser()\n config.read(filepath)\n result = config\n return result", "def load_config():\n\t\ttry:\n\t\t\tconf = ConfigParser()\n\n\t\t\tconfig_path = get_config_path()\n\t\t\tconf.read(config_path)\n\n\t\t\t# save references to conf, and config_path in class variables\n\t\t\tConfig.config_path = config_path\n\t\t\tConfig.conf = conf\n\n\t\t\tConfig.source_dir = conf.get('paths', 'source_dir')\n\t\t\tConfig.lyrics_dir = conf.get('paths', 'lyrics_dir')\n\n\t\t\tConfig.save_to_file = conf.getboolean('actions', 'save_to_file')\n\t\t\tConfig.save_to_tag = conf.getboolean('actions', 'save_to_tag')\n\n\t\t\tConfig.overwrite = conf.getboolean('actions', 'overwrite')\n\n\t\t\t# Load all the sources\n\t\t\tConfig.lyric_wikia = conf.getboolean('sources', 'lyric_wikia')\n\t\t\tConfig.musix_match = conf.getboolean('sources', 'musix_match')\n\t\t\tConfig.lyricsmode = conf.getboolean('sources', 'lyricsmode')\n\t\t\tConfig.az_lyrics = conf.getboolean('sources', 'az_lyrics')\n\n\t\t\t# Loading this with user config, we need to call the load_config only once at start.\n\t\t\tConfig.lyric_files_in_dir = glob2.glob(os.path.join(Config.lyrics_dir, '**/*.txt'))\n\n\n\t\t# Catch file handling errors\n\t\texcept IOError as e:\n\t\t\tprint('Unable to load config.')\n\t\t\tprint(e)", "def read_configuration (self):\n\t\tself.config.read(self._configfile)", "def load_config():\n global config\n\n with open(\"config.json\") as f:\n json_config = f.read()\n f.close()\n config = json.loads(json_config)", "def loadConfig():\n lines = []\n config = {}\n here = path.dirname(__file__)\n fn = path.join(here,'manatee.conf')\n try:\n with codecs.open(fn,'rU','utf-8') as conf:\n lines = conf.readlines()\n conf.close()\n except IOError as e:\n print \" Could not open configuration file: %s\" % e\n\n for line in lines:\n try:\n line = line.strip()\n if line:\n values = [x.strip() for x in line.split('=')]\n config[values[0]] = values[1]\n except Exception as e:\n print \"There was an error in the configuration file: %s\" % e\n # TODO: Any strings from the config file that might be displayed or passed into the SQL server need to be validated here.\n# config = validateConfig(config)\n return config", "def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))", "def _load_configuration(cls, config_file=None):\n config = SafeConfigParser()\n # add the defaults first\n for section, settings in CmdContext.DefaultValues.items():\n config.add_section(section)\n for option, value in settings.items():\n config.set(section, option, value)\n # read the config files\n\n config_files = []\n if config_file:\n config_files.append(config_file)\n else:\n config_files.extend(CmdContext.DefaultConfigFiles)\n\n for config_file in config_files:\n if os.access(config_file, os.F_OK | os.R_OK):\n config.read(config_file)\n return config\n\n return config", "def load(self):\n config_dict = {}\n with open(\n os.path.join(\n os.path.dirname(\n os.path.abspath(\n inspect.stack()[0][1]\n )\n ),\n \"config.txt\"), 'r') as config_file:\n for line in config_file:\n if not line.startswith('#'):\n line = line.strip().split('=', 1)\n if len(line) == 2:\n config_dict[line[0]] = line[1]\n return config_dict", "def __load_config(runtime_env):\n config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"config.ini\")\n if not os.path.exists(config_file):\n raise FileNotFoundError(config_file)\n _app_config = configparser.ConfigParser()\n _app_config.read(config_file)\n\n # Evaluate\n _app_config = _app_config[runtime_env]\n return _app_config" ]
[ "0.82460165", "0.7804471", "0.7645603", "0.76166105", "0.74599785", "0.74189556", "0.7355172", "0.7337519", "0.7330627", "0.727144", "0.7243488", "0.7229853", "0.719841", "0.71764666", "0.7172671", "0.71649605", "0.7143935", "0.71249795", "0.7120902", "0.711131", "0.7106953", "0.7094505", "0.7073841", "0.70622134", "0.70497817", "0.70448613", "0.7025158", "0.70125973", "0.70056444", "0.6992593" ]
0.8290944
0
Load configurations from config.ini and call method to set env variables when an object will be instantiated.
def __init__(self): self.config = load_config() self.set_env_var()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def __init__(self, environment='develop'):\n\n cwd = path.dirname(path.abspath(__file__))\n config_dir = path.join(cwd, 'configs')\n\n config_files = []\n for (root, _, file_names) in walk(config_dir):\n for file_name in file_names:\n config_files.append(path.join(root, file_name))\n config_files = sorted(config_files)\n\n for config_file in config_files:\n config = anyconfig.load(config_file)\n for key in config:\n self[key] = config[key]\n\n if environment in config_file:\n break", "def setup_method(self, method):\n super().setup_method(method)\n\n self.CONFIG = FakeEnv.generate_data()\n\n class MyEnv(Env):\n ENVIRON = self.CONFIG\n\n self.env = MyEnv()", "def setup(self):\n file_under_test = os.path.join(os.curdir, 'application-core',\n 'app.core.config.xml')\n with open(file_under_test) as f:\n config = f.read()\n self.config = objectify.fromstring(config)", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def __init__(self, env=None, config_src=None):\n if config_src is None:\n self.config_file = DEF_CONFIG_SRC\n else:\n self.config_file = config_src\n\n with open(self.config_file, 'r') as json_config:\n self.config = json.load(json_config)\n # get list of top-level environments from config;\n # if env is not one of them, or is None,\n # default to the \"DEFAULT\" environment.\n environments = [e for e in self.config]\n if env is None or env not in environments:\n self.env = 'DEFAULT'\n else:\n self.env = env\n\n # set up logging\n log_file = self.config[self.env]['LOG_FILE']\n log_level = self.config[self.env]['LOG_LEVEL']\n logging.basicConfig(filename=log_file,\n format='%(asctime)s - %(levelname)s - %(message)s',\n level=log_level)\n logging.info(f'Config Mgr: Config set up; env: {self.env}')\n logging.debug(f'CWD: {os.getcwd()}')\n logging.debug(f'search path: {sys.path}')", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def _setup(self):\n # Look for ini file\n if not os.path.isfile(self.ini_file):\n self._fail('Cannot find ini file')\n\n self._setup_logging()\n\n # Import debexpo root directory\n sys.path.append(os.path.dirname(self.ini_file))\n\n # Initialize Pylons app\n conf = appconfig('config:' + self.ini_file)\n pylons.config = load_environment(conf.global_conf, conf.local_conf)\n\n # Change into the incoming directory\n incoming_dir = pylons.config['debexpo.upload.incoming']\n logging.info(\"Changing dir to %s\", incoming_dir)\n os.chdir(incoming_dir)\n\n # Look for the changes file\n if not os.path.isfile(self.changes_file):\n self._fail('Cannot find changes file')", "def init():\n try:\n config = configparser.ConfigParser()\n # look for username.config on both Windows (USERNAME) and Linux (USER)\n if os.name == \"nt\":\n username = os.environ['USERNAME']\n else:\n username = os.environ['USER']\n config_file = username + \".config\"\n if not os.path.isfile(config_file):\n logging.error(\"Configuration file \" + config_file + \" not found.\")\n sys.exit()\n config.read(config_file)\n # database\n global DB_HOST, DB_PORT, DB_NAME, DB_USER, DB_PASSWORD\n DB_HOST = config[\"DATABASE\"][\"db_host\"] if (\"db_host\" in config[\"DATABASE\"]) else None\n DB_PORT = config[\"DATABASE\"][\"db_port\"]\n DB_NAME = config[\"DATABASE\"][\"db_name\"]\n DB_USER = config[\"DATABASE\"][\"db_user\"]\n DB_PASSWORD = config[\"DATABASE\"][\"db_password\"]\n except Exception:\n logger.exception(\"Failed to read config file properly\")\n raise", "def __init__(self, environment=None):\n if environment is None:\n environment = os.environ.get(\"SENTERA_ENV\") or \"prod\"\n environment = environment.lower()\n self.environment = environment\n\n if self.environment == \"prod\":\n self.config = {\n \"sentera_api_url\": \"https://api.sentera.com\",\n \"weather_api_url\": \"https://weather.sentera.com\",\n }\n else:\n self.config = {\n \"sentera_api_url\": f\"https://api{self.environment}.sentera.com\",\n \"weather_api_url\": f\"https://weather{self.environment}.sentera.com\",\n }\n\n if ENV_SENTERA_API_URL in os.environ:\n self.config[\"sentera_api_url\"] = os.environ.get(ENV_SENTERA_API_URL)\n\n if ENV_WEATHER_API_URL in os.environ:\n self.config[\"weather_api_url\"] = os.environ.get(ENV_WEATHER_API_URL)", "def __init__(self):\n\n self.path = os.path.dirname(os.path.realpath(__file__)) + '/config.ini'\n self.config = configparser.ConfigParser()\n self.config.read(self.path)", "def set_env_config(self):\n self.env_config = {\n # ===== STANDARD ARGUMENTS ======\n \"n_agents\": 4, # Number of non-planner agents\n \"world_size\": [15, 15], # [Height, Width] of the env world\n \"episode_length\": 1000, # Number of time-steps per episode\n # In multi-action-mode, the policy selects an action for each action\n # subspace (defined in component code)\n # Otherwise, the policy selects only 1 action\n \"multi_action_mode_agents\": False,\n \"multi_action_mode_planner\": True,\n # When flattening observations, concatenate scalar & vector observations\n # before output\n # Otherwise, return observations with minimal processing\n \"flatten_observations\": False,\n # When Flattening masks, concatenate each action subspace mask\n # into a single array\n # Note: flatten_masks = True is recommended for masking action logits\n \"flatten_masks\": True,\n # ===== COMPONENTS =====\n # Which components to use\n \"components\": [\n # (1) Building houses\n {\"Build\": {}},\n # (2) Trading collectible resources\n {\"ContinuousDoubleAuction\": {\"max_num_orders\": 5}},\n # (3) Movement and resource collection\n {\"Gather\": {}},\n ],\n # ===== SCENARIO =====\n # Which scenario class to use\n \"scenario_name\": \"uniform/simple_wood_and_stone\",\n # (optional) kwargs of the chosen scenario class\n \"starting_agent_coin\": 10,\n \"starting_stone_coverage\": 0.10,\n \"starting_wood_coverage\": 0.10,\n }\n\n # Create an environment instance from the config\n self.env = foundation.make_env_instance(**self.env_config)", "def __init__(self):\n load_dotenv('.env')\n self.NEWS_API_KEY = os.getenv('NEWS_API_KEY')", "def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>\\S+)\\s+(?P<vi>[=])\\s+(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG_FILENAME = os.path.splitext(os.path.abspath(__file__))[0]+'.ini'\n self.CONFIG_USER_FILENAME = re.sub(r'\\.ini$', '.user.ini', self.CONFIG_FILENAME)\n self.CONFIG.read([self.CONFIG_FILENAME, self.CONFIG_USER_FILENAME])\n\n for key, value in os.environ.items():\n m = re.match(r'^%s([A-Z]+)_([A-Z\\_\\-]+)$' % self.ENV_CONFIG_PREFIX, key)\n if m:\n self.CONFIG.set(m.group(1).lower(), m.group(2).lower(), value)\n\n self.LISTEN_IP = self.CONFIG.get('listen', 'ip')\n self.LISTEN_PORT = self.CONFIG.getint('listen', 'port')\n self.LISTEN_USERNAME = self.CONFIG.get('listen', 'username') if self.CONFIG.has_option('listen', 'username') else ''\n self.LISTEN_PASSWORD = self.CONFIG.get('listen', 'password') if self.CONFIG.has_option('listen', 'password') else ''\n self.LISTEN_VISIBLE = self.CONFIG.getint('listen', 'visible')\n self.LISTEN_DEBUGINFO = self.CONFIG.getint('listen', 'debuginfo')\n\n self.GAE_ENABLE = self.CONFIG.getint('gae', 'enable')\n self.GAE_APPIDS = re.findall(r'[\\w\\-\\.]+', self.CONFIG.get('gae', 'appid').replace('.appspot.com', ''))\n self.GAE_PASSWORD = self.CONFIG.get('gae', 'password').strip()\n self.GAE_PATH = self.CONFIG.get('gae', 'path')\n self.GAE_MODE = self.CONFIG.get('gae', 'mode')\n self.GAE_IPV6 = self.CONFIG.getint('gae', 'ipv6')\n self.GAE_WINDOW = self.CONFIG.getint('gae', 'window')\n self.GAE_KEEPALIVE = self.CONFIG.getint('gae', 'keepalive')\n self.GAE_CACHESOCK = self.CONFIG.getint('gae', 'cachesock')\n self.GAE_HEADFIRST = self.CONFIG.getint('gae', 'headfirst')\n self.GAE_OBFUSCATE = self.CONFIG.getint('gae', 'obfuscate')\n self.GAE_VALIDATE = self.CONFIG.getint('gae', 'validate')\n self.GAE_TRANSPORT = self.CONFIG.getint('gae', 'transport') if self.CONFIG.has_option('gae', 'transport') else 0\n self.GAE_OPTIONS = self.CONFIG.get('gae', 'options')\n self.GAE_REGIONS = set(x.upper() for x in self.CONFIG.get('gae', 'regions').split('|') if x.strip())\n self.GAE_SSLVERSION = self.CONFIG.get('gae', 'sslversion')\n self.GAE_PAGESPEED = self.CONFIG.getint('gae', 'pagespeed') if self.CONFIG.has_option('gae', 'pagespeed') else 0\n\n if self.GAE_IPV6:\n sock = None\n try:\n sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n sock.connect(('2001:4860:4860::8888', 53))\n logging.info('use ipv6 interface %s for gae', sock.getsockname()[0])\n except Exception as e:\n logging.info('Fail try use ipv6 %r, fallback ipv4', e)\n self.GAE_IPV6 = 0\n finally:\n if sock:\n sock.close()\n\n if 'USERDNSDOMAIN' in os.environ and re.match(r'^\\w+\\.\\w+$', os.environ['USERDNSDOMAIN']):\n self.CONFIG.set('profile', '.' + os.environ['USERDNSDOMAIN'], 'direct')\n\n withgae_sites = []\n withphp_sites = []\n crlf_sites = []\n nocrlf_sites = []\n forcehttps_sites = []\n noforcehttps_sites = []\n fakehttps_sites = []\n nofakehttps_sites = []\n dns_servers = []\n urlrewrite_map = collections.OrderedDict()\n rule_map = collections.OrderedDict()\n\n for pattern, rule in self.CONFIG.items('profile'):\n rules = [x.strip() for x in re.split(r'[,\\|]', rule) if x.strip()]\n if rule.startswith(('file://', 'http://', 'https://')) or '$1' in rule:\n urlrewrite_map[pattern] = rule\n continue\n for rule, sites in [('withgae', withgae_sites),\n ('withphp', withphp_sites),\n ('crlf', crlf_sites),\n ('nocrlf', nocrlf_sites),\n ('forcehttps', forcehttps_sites),\n ('noforcehttps', noforcehttps_sites),\n ('fakehttps', fakehttps_sites),\n ('nofakehttps', nofakehttps_sites)]:\n if rule in rules:\n sites.append(pattern)\n rules.remove(rule)\n if rules:\n rule_map[pattern] = rules[0]\n\n self.HTTP_DNS = dns_servers\n self.WITHGAE_SITES = tuple(withgae_sites)\n self.WITHPHP_SITES = tuple(withphp_sites)\n self.CRLF_SITES = tuple(crlf_sites)\n self.NOCRLF_SITES = set(nocrlf_sites)\n self.FORCEHTTPS_SITES = tuple(forcehttps_sites)\n self.NOFORCEHTTPS_SITES = set(noforcehttps_sites)\n self.FAKEHTTPS_SITES = tuple(fakehttps_sites)\n self.NOFAKEHTTPS_SITES = set(nofakehttps_sites)\n self.URLREWRITE_MAP = urlrewrite_map\n self.RULE_MAP = rule_map\n\n self.IPLIST_ALIAS = collections.OrderedDict((k, v.split('|') if v else []) for k, v in self.CONFIG.items('iplist'))\n self.IPLIST_PREDEFINED = [x for x in sum(self.IPLIST_ALIAS.values(), []) if re.match(r'^\\d+\\.\\d+\\.\\d+\\.\\d+$', x) or ':' in x]\n\n if self.GAE_IPV6 and 'google_ipv6' in self.IPLIST_ALIAS:\n for name in self.IPLIST_ALIAS.keys():\n if name.startswith('google') and name not in ('google_ipv6', 'google_talk'):\n self.IPLIST_ALIAS[name] = self.IPLIST_ALIAS['google_ipv6']\n\n self.PAC_ENABLE = self.CONFIG.getint('pac', 'enable')\n self.PAC_IP = self.CONFIG.get('pac', 'ip')\n self.PAC_PORT = self.CONFIG.getint('pac', 'port')\n self.PAC_FILE = self.CONFIG.get('pac', 'file').lstrip('/')\n self.PAC_GFWLIST = self.CONFIG.get('pac', 'gfwlist')\n self.PAC_ADBLOCK = self.CONFIG.get('pac', 'adblock')\n self.PAC_ADMODE = self.CONFIG.getint('pac', 'admode')\n self.PAC_EXPIRED = self.CONFIG.getint('pac', 'expired')\n\n self.PHP_ENABLE = self.CONFIG.getint('php', 'enable')\n self.PHP_LISTEN = self.CONFIG.get('php', 'listen')\n self.PHP_PASSWORD = self.CONFIG.get('php', 'password') if self.CONFIG.has_option('php', 'password') else ''\n self.PHP_CRLF = self.CONFIG.getint('php', 'crlf') if self.CONFIG.has_option('php', 'crlf') else 1\n self.PHP_VALIDATE = self.CONFIG.getint('php', 'validate') if self.CONFIG.has_option('php', 'validate') else 0\n self.PHP_KEEPALIVE = self.CONFIG.getint('php', 'keepalive')\n self.PHP_FETCHSERVER = self.CONFIG.get('php', 'fetchserver')\n self.PHP_HOSTS = self.CONFIG.get('php', 'hosts').split('|') if self.CONFIG.get('php', 'hosts') else []\n\n self.VPS_ENABLE = self.CONFIG.getint('vps', 'enable')\n self.VPS_LISTEN = self.CONFIG.get('vps', 'listen')\n self.VPS_FETCHSERVER = self.CONFIG.get('vps', 'fetchserver')\n\n self.PROXY_ENABLE = self.CONFIG.getint('proxy', 'enable')\n self.PROXY_AUTODETECT = self.CONFIG.getint('proxy', 'autodetect') if self.CONFIG.has_option('proxy', 'autodetect') else 0\n self.PROXY_HOST = self.CONFIG.get('proxy', 'host')\n self.PROXY_PORT = self.CONFIG.getint('proxy', 'port')\n self.PROXY_USERNAME = self.CONFIG.get('proxy', 'username')\n self.PROXY_PASSWROD = self.CONFIG.get('proxy', 'password')\n\n if not self.PROXY_ENABLE and self.PROXY_AUTODETECT:\n system_proxy = ProxyUtil.get_system_proxy()\n if system_proxy and self.LISTEN_IP not in system_proxy:\n _, username, password, address = ProxyUtil.parse_proxy(system_proxy)\n proxyhost, _, proxyport = address.rpartition(':')\n self.PROXY_ENABLE = 1\n self.PROXY_USERNAME = username\n self.PROXY_PASSWROD = password\n self.PROXY_HOST = proxyhost\n self.PROXY_PORT = int(proxyport)\n if self.PROXY_ENABLE:\n self.GAE_MODE = 'https'\n\n self.AUTORANGE_HOSTS = self.CONFIG.get('autorange', 'hosts').split('|')\n self.AUTORANGE_ENDSWITH = tuple(self.CONFIG.get('autorange', 'endswith').split('|'))\n self.AUTORANGE_NOENDSWITH = tuple(self.CONFIG.get('autorange', 'noendswith').split('|'))\n self.AUTORANGE_MAXSIZE = self.CONFIG.getint('autorange', 'maxsize')\n self.AUTORANGE_WAITSIZE = self.CONFIG.getint('autorange', 'waitsize')\n self.AUTORANGE_BUFSIZE = self.CONFIG.getint('autorange', 'bufsize')\n self.AUTORANGE_THREADS = self.CONFIG.getint('autorange', 'threads')\n\n self.FETCHMAX_LOCAL = self.CONFIG.getint('fetchmax', 'local') if self.CONFIG.get('fetchmax', 'local') else 3\n self.FETCHMAX_SERVER = self.CONFIG.get('fetchmax', 'server')\n\n self.DNS_ENABLE = self.CONFIG.getint('dns', 'enable')\n self.DNS_LISTEN = self.CONFIG.get('dns', 'listen')\n self.DNS_SERVERS = self.HTTP_DNS or self.CONFIG.get('dns', 'servers').split('|')\n self.DNS_BLACKLIST = set(self.CONFIG.get('dns', 'blacklist').split('|'))\n self.DNS_TCPOVER = tuple(self.CONFIG.get('dns', 'tcpover').split('|')) if self.CONFIG.get('dns', 'tcpover').strip() else tuple()\n if self.GAE_IPV6:\n self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' in x]\n else:\n self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' not in x]\n\n self.USERAGENT_ENABLE = self.CONFIG.getint('useragent', 'enable')\n self.USERAGENT_STRING = self.CONFIG.get('useragent', 'string')\n\n self.LOVE_ENABLE = self.CONFIG.getint('love', 'enable')\n self.LOVE_TIP = self.CONFIG.get('love', 'tip').encode('utf8').decode('unicode-escape').split('|')", "def __init__(self, settings):\n self._read_config(settings)", "def __init__(self, ini_file):\n self.config = configparser.ConfigParser()\n self.config.read(ini_file)", "def __init__(self):\n cfg = ConfigParser.ConfigParser()\n\n if sys.executable == sys.argv[0]: # Windows binary\n self.VISIONEGG_SYSTEM_DIR = os.curdir\n self.VISIONEGG_USER_DIR = os.curdir\n else:\n # non-standard VisionEgg installations\n try:\n self.VISIONEGG_SYSTEM_DIR = os.environ['VISIONEGG_SYSTEM_DIR']\n except KeyError:\n self.VISIONEGG_SYSTEM_DIR = os.path.split(__file__)[0]\n user_dir = os.path.expanduser(\"~\")\n self.VISIONEGG_USER_DIR = os.path.join(user_dir,\"VisionEgg\")\n\n # See if there's an environment variable for the config file\n if 'VISIONEGG_CONFIG_FILE' in os.environ.keys():\n configFile = os.environ['VISIONEGG_CONFIG_FILE']\n else:\n # Is there one in VISIONEGG_USER_DIR?\n configFile = os.path.join(self.VISIONEGG_USER_DIR,\"VisionEgg.cfg\")\n if not os.path.isfile(configFile):\n configFile = os.path.join(self.VISIONEGG_SYSTEM_DIR,\"VisionEgg.cfg\")\n if not os.path.isfile(configFile):\n configFile = None # No file, use defaults specified in environment variables then here\n\n if configFile:\n cfg.read(configFile)\n else:\n # pretend we have a config file\n cfg.add_section('General')\n for key in defaults.keys():\n cfg.set('General',key,str(defaults[key]))\n if sys.platform == 'darwin':\n cfg.add_section('darwin')\n for key in extra_darwin_defaults.keys():\n cfg.set('darwin',key,str(extra_darwin_defaults[key]))\n\n # Do the general stuff first\n # Set the default values\n for name in defaults.keys():\n if name in os.environ.keys():\n value = os.environ[name]\n else:\n value = defaults[name]\n if isinstance(defaults[name], int):\n\t\tif value == 'False':\n\t\t value = 0\n\t\telif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n # Get the values from the configFile\n general_options = cfg.options('General')\n\n self._delayed_configuration_log_warnings = [] # chick and egg problem\n # set defaults from config file\n for option in general_options:\n name = option.upper()\n if name not in defaults.keys():\n self._delayed_configuration_log_warnings.append(\n \"While reading %s: The variable \\\"%s\\\" is not (anymore) a Vision Egg variable.\"%(os.path.abspath(configFile),option))\n continue\n value = cfg.get('General',option)\n if name in os.environ.keys():\n value = os.environ[name]\n if isinstance(defaults[name], int):\n\t\tif value == 'False':\n\t\t value = 0\n\t\telif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n # Do platform specific stuff\n # Set the default values\n platform_name = sys.platform\n extra_name = \"extra_%s_defaults\"%(platform_name,)\n if extra_name in globals().keys():\n extra_defaults = globals()[extra_name]\n for name in extra_defaults.keys():\n setattr(self,name,extra_defaults[name])\n\n # Get the values from the configFile\n platform_options = cfg.options(platform_name)\n for option in platform_options:\n name = option.upper()\n if name not in extra_defaults.keys():\n raise KeyError(\"No Vision Egg configuration variable \\\"%s\\\"\"%option)\n value = cfg.get(platform_name,option)\n if name in os.environ.keys():\n value = os.environ[name]\n if isinstance(extra_defaults[name], int):\n\t\t if value == 'False':\n\t\t value = 0\n \t\t elif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(extra_defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n if(configFile):\n self.VISIONEGG_CONFIG_FILE = os.path.abspath(configFile)\n else:\n self.VISIONEGG_CONFIG_FILE = None", "def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))", "def __init__(self, config):\n err_msg = \".puppeteer.yml must have a list of environments. Please see setup details at {0}.\".format(\n PROJECT_URL)\n try:\n self.envs = config.get('environments')\n if self.envs is None or not isinstance(self.envs, list):\n raise ControlRepoError(err_msg)\n except (TypeError, AttributeError):\n raise ControlRepoError(err_msg)\n\n self.inventory_file = config.get('inventory_file', 'inventory.ini')\n self.repo_file = REPO_FILE\n self.env_dir = 'environments'\n self.group_dir = 'group_vars'\n self.host_dir = 'host_vars'\n self.roles_dir = 'roles'", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def settings_init(self):\n config_console = configparser.ConfigParser()\n config_console.read(CONFIG_FILE_NAME)\n self.logmode = config_console[\"LOG\"][\"log_mode\"]", "def setup_config():\n global config\n config = modConfig.Config(cmdline.config)", "def initialize_from_config(self):", "def setup_config():\n\n config = configparser.ConfigParser()\n config.read(CONFIG_PATH)\n\n return config", "def load_environment(self, env):\n self.env = env", "def setup_method(self, method):\n super().setup_method(method)\n\n Env.ENVIRON = {}\n self.env.read_env(\n Path(__file__, is_file=True)('test_env.txt'),\n PATH_VAR=Path(__file__, is_file=True).__root__\n )", "def __init__(self) -> None:\n self._settings = {}\n\n # Load values from global_settings (only uppercase)\n self.filter_and_set(global_settings)\n\n settings_env_value: str = os.environ.get(SETTINGS_ENV)\n if settings_env_value:\n # Load values from custom settings\n try:\n module = importlib.import_module(settings_env_value)\n except ModuleNotFoundError:\n msg = \"Can't import custom settings. Is it under PYTHONPATH?\"\n raise ModuleError(msg)\n self.filter_and_set(module)", "def __init__(self, config_file_name=\"config.json\"):\n with open(config_file_name, \"r\") as config:\n f = dict(json.load(config))\n for key, value in f.items():\n setattr(self, key, value)", "def __init__(self, config_file, verbose):\r\n self.loadConfig(config_file)\r\n self.verbose = verbose", "def load_configuration(app, environment):\n environment_configuration = ('config/settings_%s.py') % (environment)\n\n app.config.from_object(__name__)\n app.config.from_pyfile('config/settings_default.py')\n app.config.from_pyfile(environment_configuration)" ]
[ "0.73404276", "0.6964214", "0.68908703", "0.6842718", "0.66873354", "0.6576955", "0.6550852", "0.6545068", "0.6535788", "0.6514443", "0.6433528", "0.64094526", "0.63856107", "0.6378766", "0.63634384", "0.6355668", "0.634891", "0.63091236", "0.62860405", "0.6229751", "0.6224098", "0.62167376", "0.6201208", "0.619943", "0.61936986", "0.6185888", "0.6176377", "0.6168531", "0.6164657", "0.61576915" ]
0.79680717
0
Load corenlp properties for coreference resolution.
def corenlp_coref_props(self): coref_props = self.config._sections['corenlp_coref_props'] return coref_props
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\n for prop in self.properties:\n try:\n value = getattr(self, prop)\n self._prop_dict[prop] = value\n except AttributeError as ate:\n pass", "def initProperties(self):\n self.setFoldComments(Preferences.getEditor(\"CssFoldComment\"))\n self.setFoldCompact(Preferences.getEditor(\"AllFoldCompact\"))\n try:\n self.setHSSLanguage(\n Preferences.getEditor(\"CssHssSupport\"))\n self.setLessLanguage(\n Preferences.getEditor(\"CssLessSupport\"))\n self.setSCSSLanguage(\n Preferences.getEditor(\"CssSassySupport\"))\n except AttributeError:\n pass", "def _load_core_factor():\n coreFactor = session.get('coreFactor')\n\n if coreFactor is None:\n g.coreFactor = None\n else:\n g.coreFactor = coreFactor", "def parse_and_evaluate_corenlp_coref(input_dir = 'CoreNLP_coref_anno/dev', gold_annotations_folder = '../../../data/baseline/dev'):\n\t\n\tscores = []\n\t\n\tfor file in os.listdir(input_dir):\n\t\tif re.match(r'(.+)\\.xml', file)!= None:\n\t\t\tclusters = []\n\t\t\tokr_graph = load_graph_from_file(gold_annotations_folder + '/'+ re.match(r'(.+)\\.xml', file).group(1)[:-4]+'.xml')\n\t\t\ttree = ET.parse(input_dir + '/' + file)\n\t\t\tdocument = tree.getroot()[0]\n\t\t\tcoref_node = document.find('coreference')\n\t\t\t\n\t\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\t\tcluster = []\n\t\t\t\tfor mention in coref_chain:\n\t\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\t\tindices = range(start,end)\n\t\t\t\t\ttext = mention[4].text\n\t\t\t\t\tmention_string = str(sent_num)+ str(indices)\n\t\t\t\t\tcluster.append((mention_string, text))\n\t\t\t\tclusters.append(cluster)\n\t\t\tclusters = [set([item[0] for item in cluster]) for cluster in clusters]\n\t\t\n\t\t\t# gold_mentions = [set(map(str, entity.mentions.values())) for entity in okr_graph.entities.values()]\n\t\t\t# print('\\n')\n\t\t\t# print('Gold Mentions:', gold_mentions)\n\n\t\t\t# print('*********')\n\t\t\t# print('*********')\n\t\t\tcurr_scores = eval_clusters(clusters, okr_graph)\n\t\t\tscores.append(curr_scores)\n\n\tprint(scores)\t\t\n\tscores = np.mean(scores, axis=0).tolist() \n\tprint(scores)", "def load_properties(self, meta):\n\t\t# doctype properties\n\t\tfor prop in doctype_properties:\n\t\t\tself.set(prop, meta.get(prop))\n\n\t\tfor d in meta.get(\"fields\"):\n\t\t\tnew_d = {\n\t\t\t\t\"fieldname\": d.fieldname,\n\t\t\t\t\"is_custom_field\": d.get(\"is_custom_field\"),\n\t\t\t\t\"is_system_generated\": d.get(\"is_system_generated\"),\n\t\t\t\t\"name\": d.name,\n\t\t\t}\n\t\t\tfor prop in docfield_properties:\n\t\t\t\tnew_d[prop] = d.get(prop)\n\t\t\tself.append(\"fields\", new_d)\n\n\t\tfor fieldname in (\"links\", \"actions\", \"states\"):\n\t\t\tfor d in meta.get(fieldname):\n\t\t\t\tself.append(fieldname, d)", "def corpus_properties(dataset):\n print '> Reading data..', dataset\n corpus_path = '../data/'+dataset+'_dependencies'\n (documents, labels) = data.read_files(corpus_path)\n\n props = {}\n #~ giant = nx.DiGraph()\n print '> Building networks..'\n for i, deps in enumerate(documents):\n if i%10==0: print ' ',str(i)+'/'+str(len(documents))\n g = graph_representation.construct_cooccurrence_network(deps)\n #~ giant.add_edges_from(g.edges())\n p = graph.network_properties(g)\n for k,v in p.iteritems():\n if i==0: props[k] = []\n props[k].append(v)\n g = None # just to make sure..\n\n print '> Calculating means and deviations..'\n props_total = {}\n for key in props:\n print ' ',key\n props_total[key+'_mean'] = numpy.mean(props[key])\n props_total[key+'_std'] = numpy.std(props[key])\n\n data_name = dataset.replace('/','.')\n #~ data.pickle_to_file(giant, 'output/properties/cooccurrence/giant_'+data_name)\n data.pickle_to_file(props, 'output/properties/dependency/stats_'+data_name)\n data.pickle_to_file(props_total, 'output/properties/dependency/stats_tot_'+data_name)", "def get_load_references(self):\n for path, properties in self.load.items():\n yield CaseLoadReference(path=path, properties=list(properties))", "def LoadSubversionAutoProperties():\r\n if os.name == 'nt':\r\n subversion_config = os.environ.get(\"APPDATA\") + \"\\\\Subversion\\\\config\"\r\n else:\r\n subversion_config = os.path.expanduser(\"~/.subversion/config\")\r\n if not os.path.exists(subversion_config):\r\n return {}\r\n config = ConfigParser.ConfigParser()\r\n config.read(subversion_config)\r\n if (config.has_section(\"miscellany\") and\r\n config.has_option(\"miscellany\", \"enable-auto-props\") and\r\n config.getboolean(\"miscellany\", \"enable-auto-props\") and\r\n config.has_section(\"auto-props\")):\r\n props = {}\r\n for file_pattern in config.options(\"auto-props\"):\r\n props[file_pattern] = ParseSubversionPropertyValues(\r\n config.get(\"auto-props\", file_pattern))\r\n return props\r\n else:\r\n return {}", "def parse_corenlp_coref_xml_doc(input_dir = 'CoreNLP_coref_anno/dev'):\n\n\tmentions = []\n\tfor file in os.listdir(input_dir):\n\t\ttree = ET.parse(input_dir + '/' + file)\n\t\tdocument = tree.getroot()[0]\n\t\t# sentences_node = document.find('sentences')\n\n\t\t# for sentence in enumerate(sentences_node):\n\t\t# \ts_num = sentence.attribs['id']\n\t\t# \tsentence_text = \" \".join([token.word for token in sentence.find('tokens')])\n\t\t# \tsentences[s_num] = sentence_text\n\n\t\tcoref_node = document.find('coreference')\n\t\t\n\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\tfor mention in cluster:\n\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\ttext = mention[4].text\n\t\t\t\tmentions.append({\"filename\":file, \"s_num\":sent_num,\"EP\":\"E\", \"indices\":range(start, end),\"coref\":coref_id+1})\n\n\tmentions.sort(key=lambda x:(x[\"filename\"],x[\"s_num\"],x[\"indices\"][0]))\n\twith open('coref_output.txt', 'w') as out_file:\n\t\tout_file.write(\"file\\tsentence\\tentity(E) or predicate(P)\\t coref chain\\tindices\\t\\n\")\n\t\tout_file.write(\"\\n\".join([e[\"filename\"]+\"\\t\"+str(e[\"s_num\"])+\"\\t\"+e[\"EP\"]+\"\\t\"+str(e[\"coref\"])+\"\\t\"+str(e[\"indices\"])[1:-1] for e in mentions]))", "def load_cnns(self):\n self.cnn1 = cnn_utils.CNN()\n self.cnn1.load_state_dict(torch.load(f'{self.model_dir}/model1.pt'))\n self.cnn1.eval()\n self.cnn2 = cnn_utils.CNN()\n self.cnn2.load_state_dict(torch.load(f'{self.model_dir}/model2.pt'))\n self.cnn2.eval()", "def parse_and_analyse_corenlp_coref(input_dir = 'CoreNLP_coref_anno/dev', gold_annotations_folder = '../../../data/baseline/dev'):\n\tmentions = []\n\n\n\twith open('coref_analyse_output.txt', 'w') as out_file:\n\n\t\tfor file_name in os.listdir(input_dir):\n\t\t\tif re.match(r'(.+)\\.xml', file_name)!= None:\n\t\t\t\tokr_graph = load_graph_from_file(gold_annotations_folder + '/'+ re.match(r'(.+)\\.xml', file_name).group(1)[:-4]+'.xml')\n\n\t\t\t\ttree = ET.parse(input_dir + '/' + file_name)\n\t\t\t\tdocument = tree.getroot()[0]\n\t\t\t\tsentence_wise_predicted_mentions = defaultdict(list)\n\t\t\t\tsentence_wise_gold_mentions = defaultdict(list)\n\t\t\t\tpredicted_coref_dict = defaultdict(list)\n\t\t\t\tgold_coref_dict = defaultdict(list)\n\n\t\t\t\tcoref_node = document.find('coreference')\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tfor coref_id, coref_chain in enumerate(coref_node):\n\t\t\t\t\tfor mention in coref_chain:\n\t\t\t\t\t\tsent_num = int(mention[0].text)\n\t\t\t\t\t\tstart = int(mention[1].text)-1\n\t\t\t\t\t\tend = int(mention[2].text)-1\n\t\t\t\t\t\ttext = mention[4].text\n\t\t\t\t\t\tsentence_wise_predicted_mentions[sent_num].append({\"indices\":range(start, end),\"coref\":coref_id+1, \"text\":text})\n\t\t\t\t\t\tpredicted_coref_dict[coref_id+1].append({\"indices\":range(start, end), \"s_num\":sent_num, \"text\":text })\n\n\n\t\t\t\t\n\t\t\t\t\t\t\t\t\n\t\t\t\tfor entity in okr_graph.entities.values():\n\t\t\t\t\tfor mention in entity.mentions.values():\n\t\t\t\t\t\tsentence_wise_gold_mentions[mention.sentence_id].append({\"indices\":mention.indices,\"coref\":entity.id, 'text':mention.terms})\n\n\t\t\t\tprint'###'+ file_name + '\\n'\t\n\t\t\t\tfor sentence_id, sentence in enumerate(okr_graph.sentences.values()):\n\t\t\t\t\tprint 'Sentence: ', ' '.join(sentence) \n\t\t\t\t\tprint 'Predicted entities: ', [element['text'] for element in sentence_wise_predicted_mentions[sentence_id+1]]\n\t\t\t\t\tprint 'Gold entities: ', [element['text'] for element in sentence_wise_gold_mentions[sentence_id+1]]\n\t\t\t\t\tprint ' '\n\t\t\t\n\t\t\t\tprint \"Not printing singletons\"\n\t\t\t\tprint('\\nThe predicted clusters: ')\n\t\t\t\tfor cluster_id, cluster in enumerate(predicted_coref_dict.values()):\n\t\t\t\t\tprint('Cluster id: ', cluster_id +1)\n\t\t\t\t\tprint([[okr_graph.sentences[mention['s_num']][index] for index in mention['indices']]for mention in predicted_coref_dict[cluster_id+1]] )\n\n\t\t\t\tprint('\\n The Gold clusters:')\t\n\t\t\t\tfor entity in okr_graph.entities.values():\n\t\t\t\t\tprint('cluster_id: ', entity.id )\n\t\t\t\t\tprint([mention.terms for mention in entity.mentions.values()])\n\n\t\t\t\tprint '**********'", "def get_crops_properties() -> dict:\n with open(CROPS_PROPERTIES, \"r\", encoding=\"utf-8\") as stream:\n crop_props = yaml.safe_load(stream)\n\n return crop_props", "def get_properties():", "def initializeCoreNLP(language='english'):\n\tacceptedLanguages = ['english','arabic','chinese','french','german','spanish']\n\tassert language in acceptedLanguages\n\n\tif testCoreNLPConnection():\n\t\treturn\n\n\tif hasOldCoreNLP():\n\t\traise RuntimeError(\"Kindred needs a newer version of CoreNLP. Please use kindred.downloadCoreNLP() to upgrade to the latest version (and clear out the old version)\")\n\n\tcorenlpDir = kindred.utils._findDir(currentCoreNLPInfo['directory'],downloadDirectory)\n\tif corenlpDir is None:\n\t\traise RuntimeError(\"Unable to find local server so trying to initialize CoreNLP instance. Could not find the Stanford CoreNLP files. Use kindred.downloadCoreNLP() first if subprocess should be used.\")\n\n\tif language != 'english' and not coreNLPLanguageFileExists(language):\n\t\traise RuntimeError(\"Could not find the Stanford CoreNLP model files for language: %s. Use kindred.downloadCoreNLPLanguage('%s') first.\" % (language,language))\n\n\tif language == 'english':\n\t\tcommand='java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 150000 -quiet true'\n\telse:\n\t\tcommand='java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -serverProperties StanfordCoreNLP-%s.properties -port 9000 -timeout 150000 -quiet true' % language\n\n\tdownloadDirectoryLock = os.path.join(downloadDirectory,'lock')\n\n\ttrackingDir = os.path.join(downloadDirectory,'tracking')\n\twith fasteners.InterProcessLock(downloadDirectoryLock):\n\t\tif not os.path.isdir(trackingDir):\n\t\t\tos.makedirs(trackingDir)\n\n\tstartLock = os.path.join(trackingDir,socket.gethostname()+'.start.lock')\n\tcorenlpPIDFile = os.path.join(trackingDir,socket.gethostname()+'.corenlp.pid')\n\twith fasteners.InterProcessLock(startLock):\n\t\tif testCoreNLPConnection():\n\t\t\treturn\n\n\t\tkindredPIDsFileLock = os.path.join(trackingDir,socket.gethostname()+'.kindred.pid.locks')\n\t\tkindredPIDsFile = os.path.join(trackingDir,socket.gethostname()+'.kindred.pid')\n\t\twith fasteners.InterProcessLock(kindredPIDsFileLock):\n\t\t\twith open(kindredPIDsFile,'w') as outF:\n\t\t\t\toutF.write(\"%d\\n\" % os.getpid())\n\n\t\tos.chdir(corenlpDir)\n\n\t\tcorenlpProcess = subprocess.Popen(shlex.split(command), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, cwd=corenlpDir, preexec_fn=os.setpgrp)\n\t\twith open(corenlpPIDFile,'w') as f:\n\t\t\tf.write(\"%d\\n\" % corenlpProcess.pid)\n\n\t\tmaxTries = 10\n\n\t\tconnectionSuccess = False\n\t\tfor tries in range(maxTries):\n\t\t\tif testCoreNLPConnection():\n\t\t\t\tconnectionSuccess = True\n\t\t\t\tbreak\n\t\t\ttime.sleep(5)\n\n\t\tif not connectionSuccess:\n\t\t\tkillCoreNLP()\n\t\t\traise RuntimeError(\"Unable to connect to launched CoreNLP subprocess\")\n\n\t\ttime.sleep(1)", "def load_general_props(self):\n\n # load general and shared header.properties file from zipfile\n root = ForceArchive(self.file_path).read_properties('header.properties')\n shared = ForceArchive(self.file_path).read_properties('shared-data/header.properties')\n full = {}\n full.update(root)\n full.update(shared)\n return full", "def load_properties(self):\n self.get_overview_pages()\n # loop over page numbers to get list of property IDs\n for pageNum in np.nditer(self.pages):\n if pageNum <= self.maxPages:\n overviewURL = sOVERVIEW_URL + sPAGE_NUM_PREFIX + str(pageNum)\n print('Accessing: %s' % overviewURL)\n listings = self.get_listings_on_page(overviewURL)\n # now loop over all listings on page, and load url\n for i in range(len(listings)):\n print('viewing property: %s' % listings[i])\n url = self.assemble_link(listings[i])\n # set up property page and load data\n aProperty = PropertyPage(url)\n aProperty.load_website()\n aProperty.load_data(listings[i])\n self.data.add_data(aProperty.df)", "def __init__(self):\r\n\t\tself.label = \"Linked Data Location Entities Property Enrichment\"\r\n\t\tself.description = \"Get the most common properties from DBpedia according to input wikidata location entity IRI\"\r\n\t\tself.canRunInBackground = False\r\n\t\t# self.propertyURLList = []\r\n\t\t#propertyNameList = []\r\n\t\tLinkedDataPropertyEnrich.count += 1", "def onLoadConfig(self, inifile):\n cp = ConfigParser(self.defaults)\n cp.readfp(inifile)\n depth = self.getDepth(cp)\n self.baseurl = urljoin(self.inipath, depth)\n # create child loaders for any other l10n.ini files to be included\n try:\n for title, path in cp.items('includes'):\n # skip default items\n if title in self.defaults:\n continue\n # add child config parser\n self.addChild(title, path, cp)\n except NoSectionError:\n pass\n # try to load the \"dirs\" defined in the \"compare\" section\n try:\n self.dirs.extend(cp.get('compare', 'dirs').split())\n except (NoOptionError, NoSectionError):\n pass\n # try getting a top level compare dir, as used for fennec\n try:\n self.tld = cp.get('compare', 'tld')\n # remove tld from comparison dirs\n if self.tld in self.dirs:\n self.dirs.remove(self.tld)\n except (NoOptionError, NoSectionError):\n self.tld = None\n # try to set \"all_path\" and \"all_url\"\n try:\n self.all_path = cp.get('general', 'all')\n self.all_url = urljoin(self.baseurl, self.all_path)\n except (NoOptionError, NoSectionError):\n self.all_path = None\n self.all_url = None\n return cp", "def __load_cogs(self):\n for cog in self.__cogs.get():\n logging.info('loading %s', cog)\n self.load_extension(cog)", "def add_feat_conf(self, conf_map):\n conf_map['pcdi_trigger'] = str(self.pcdi_triggers.text()).replace('\\n', '')\n conf_map['partial_coherence_type'] = '\"' + str(self.pcdi_type.text()) + '\"'\n conf_map['partial_coherence_iteration_num'] = str(self.pcdi_iter.text())\n conf_map['partial_coherence_normalize'] = str(self.pcdi_normalize.text())\n conf_map['partial_coherence_roi'] = str(self.pcdi_roi.text()).replace('\\n', '')", "def load_conf(self):\n self._read_uconf()", "def onload(self):\n\t\tload_address_and_contact(self)", "def readProperties():\n separator = \":\"\n props = {}\n \n with open('upgrade.properties') as f:\n\n for line in f:\n if separator in line:\n\n # Find the name and value by splitting the string\n name, value = line.split(separator, 1)\n\n # Assign key value pair to dict\n # strip() removes white space from the ends of strings\n props[name.strip()] = value.strip()\n\n props['JDA_HOME'] = props['JDA_HOME'].replace('-', ':')\n globs.props = props\n\n globs.UserPassDict = {}\n for user_cat in globs.CRED_DICT:\n globs.UserPassDict[props[user_cat]] = props[globs.CRED_DICT[user_cat]]", "def corenlp_params(self):\n memory = self.config.get('corenlp_params', 'memory')\n timeout = self.config.getint('corenlp_params', 'timeout')\n return memory, timeout", "def getProperties():", "def initProxy(self,parent):\n\n params_file=str(parent)+\"/../statics/params.cfg\"\n print params_file\n param_dict=Loader.factory('NML').load(params_file)\n self.proxyserver=str(param_dict.get('proxy','proxy_adress'))\n self.proxyuser=str(param_dict.get('proxy','proxy_user'))\n self.proxypass=str(param_dict.get('proxy','proxy_pass'))\n self.cmemsuser=str(param_dict.get('cmems_server','user_cmems'))\n self.cmemspass=str(param_dict.get('cmems_server','pass_cmems'))", "def __initialize_nlp(self, nlp):\n nlp[\"nbQ\"] = 0\n nlp[\"nbQdot\"] = 0\n nlp[\"nbTau\"] = 0\n nlp[\"nbMuscles\"] = 0\n nlp[\"plot\"] = {}\n nlp[\"var_states\"] = {}\n nlp[\"var_controls\"] = {}\n nlp[\"CX\"] = self.CX\n nlp[\"x\"] = nlp[\"CX\"]()\n nlp[\"u\"] = nlp[\"CX\"]()\n nlp[\"J\"] = []\n nlp[\"g\"] = []\n nlp[\"g_bounds\"] = []\n nlp[\"casadi_func\"] = {}", "def load_common_words(self):\n with open(COMMON_WORDS_FILE_NAME, 'r') as f:\n self.common_words = self.nltk_text(f.read().decode('utf-8'))", "def load(self):\n \n with open(os.path.join(self.output_dir, 'terms.dict'), 'rb') as f:\n self.term_id_map = pkl.load(f)\n with open(os.path.join(self.output_dir, 'docs.dict'), 'rb') as f:\n self.doc_id_map = pkl.load(f)", "def parse_gc(gc):\n prop_gc = {}\n with open(gc, 'r') as f:\n for line in f:\n tmp = line.strip().split()\n prop_gc[tmp[0]] = tmp[1]\n return prop_gc" ]
[ "0.5345785", "0.5221034", "0.5063204", "0.50521326", "0.5028855", "0.49547", "0.4940425", "0.4928802", "0.48686203", "0.48553947", "0.48520488", "0.48199075", "0.48059475", "0.48050794", "0.4777459", "0.47767904", "0.47611597", "0.47114277", "0.47111797", "0.46975923", "0.46701762", "0.46415266", "0.46372586", "0.46196446", "0.458083", "0.45591325", "0.45536736", "0.45518228", "0.45387778", "0.45379624" ]
0.7217
0
Test single digit min base.
def test_single_digit_min_base(self): expected = 2 digit = 1 assert expected == min_base(digit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_digit_14_min_base(self):\n expected = 6\n digit = 14\n\n assert expected == min_base(digit)", "def test_digit_12_min_base(self):\n expected = 5\n digit = 12\n\n assert expected == min_base(digit)", "def min_digit(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n s = 10\n while(x>0):\n if(s>(x%10)):\n s = x%10\n x = x//10\n return s", "def checkBase(base, n):\n current = 1\n while current < n:\n current = current * base + 1\n return current == n", "def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n return result", "def modifier(base):\n return int(math.floor((base - 10) / 2))", "def get_first_digit(x):\n x = int(x)\n if x < 0:\n return 0\n x = str(x)\n if len(x) == 1: # less than 10 ?\n return 0\n else:\n return int(x[0])", "def perfect_number(base):\n\tif type(base) is not int or base < 0:\n\t\treturn None\n\tbase = str(base)\n\tlt = [int(x) for x in base]\n\tif sum(lt) > 10:\n\t\treturn None\n\telse:\n\t\tlt.append(10-sum(lt))\n\tlt = [str(x) for x in lt]\n\treturn int(\"\".join(lt))", "def check_base(base, coin):\n value = coin_value_in_base(base, coin)\n if value % 2 == 0:\n return 2\n if value % 3 == 0:\n return 3\n div = 5\n while div * div < value:\n if value % div == 0:\n return div\n if value % (div + 2) == 0:\n return div + 2\n div += 6\n return False", "def Happy(n, b):\r\n n = ToBase(b, n)\r\n seen = set()\r\n while n not in seen:\r\n seen.add(n) \r\n v = 0\r\n while n:\r\n d = n % 10\r\n n = n // 10\r\n v += d * d\r\n n = ToBase(b, v) \r\n if n == 1:\r\n return True\r\n return False", "def intable(int_str, base=10):\n try:\n int(int_str, base)\n return True\n except:\n return False", "def has_small_digits(n,maxdigit):\n digits = [int(num) for num in str(n)]\n return all([num <= maxdigit for num in digits])", "def choose_bin_base() -> int:\n return npr.choice((2, 8, 16))", "def exceeds_min(value, min_):\n\n if isinstance(value, (float, int)):\n val_ = value\n else:\n try:\n val_ = int(value)\n except:\n val_ = value\n if isinstance(min_, (float, int)):\n return (val_ < min_)\n else:\n if min_.isalnum():\n try:\n imin = int(min_)\n return (val_ < imin)\n except:\n pass\n \n return False", "def test_smallest_number_with_last_digit_equal_to_an_input_digit():\n assert smallest_number_with_last_digit_equal_to_an_input_digit([1, 6, 34, 68, 40, 48, 20], 8) == 48\n assert smallest_number_with_last_digit_equal_to_an_input_digit([1, 2, 3], 3) == 3\n assert smallest_number_with_last_digit_equal_to_an_input_digit([101, 1001, 100001], 1) == 101", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def int_to_base(num, base):\n if num<=0: return '0' \n digits = []\n return ''.join(digits)", "def get_prime_digits_for_one(a: int) -> bool:\r\n b = a\r\n c = 0\r\n c1 = 0\r\n while b > 0:\r\n c1 += 1\r\n n = b % 10\r\n if isprime(n):\r\n c += 1\r\n b = b // 10\r\n if c == c1:\r\n return True\r\n else:\r\n return False", "def PFirstDigit(d):\r\n return math.log(1.0+ 1.0/float(d)) / math.log(10)", "def int2base(x, base):\n digs = string.digits + string.ascii_lowercase\n if x < 0:\n sign = -1\n elif x == 0:\n return '0'\n else:\n sign = 1\n x *= sign\n digits = []\n while x:\n digits.append(digs[x % base])\n x //= base\n if sign < 0:\n digits.append('-')\n digits.reverse()\n return ''.join(digits)", "def get_base(ciphertext):\n # 10+6\n b16 = string.digits + 'ABCDEF'\n # 26+6+1\n b32 = string.ascii_uppercase + '234567='\n # 10+26*2+2+1\n b64 = string.digits + string.letters + '+/='\n\n bdict = {'64': b64, '32': b32, '16': b16}\n all_char = set(ciphertext)\n\n for key in sorted(bdict.keys()):\n if all_char.issubset(bdict[key]):\n return key", "def test_min_pressure_value(self):\n self.assertEqual(\"%0.7f\" % self.PminValue, str(0.0101325))", "def base_repr(i, base):\n\n assert i>=0 and base>=2\n \n if i==0:\n return ['0']\n\n if base<=10:\n return _small_base(i, base)\n\n assert base<=36\n return _large_base(i, base)", "def base(x):\r\n return x(0)", "def solution(max_base: int = 5) -> int:\n freqs = defaultdict(list)\n num = 0\n\n while True:\n digits = get_digits(num)\n freqs[digits].append(num)\n\n if len(freqs[digits]) == max_base:\n base = freqs[digits][0] ** 3\n return base\n\n num += 1", "def test_check_all_default_bases_positional(self, number, base):\n converted = positional.encode(number, base)\n self.assertEqual(positional.decode(converted, base), number)", "def main(destination_base, max_number, decimal_number):\n if 2 <= destination_base <= 9:\n if 0 <= decimal_number <= max_number:\n converted_number = base_conversion(destination_base, decimal_number)\n print(f\"the converted number is: {converted_number}\")\n else:\n print(\"invalid input for base 10 number\")\n else:\n print(\"invalid input for destination base\")", "def baseN(num, b, numerals=\"0123456789abcdefghijklmnopqrstuvwxyz\"):\n neg = num < 0\n num = abs(num)\n val = ((num == 0) and numerals[0]) or (baseN(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])\n return '-' + val if neg else val", "def MINIMUM_BET() -> int:\n return 10", "def to_base_10(number, base):\n\n number_in_base_10 = 0\n\n exp = len(number) - 1\n for digit in number:\n number_in_base_10 += get_number_from_character(digit) * pow(base, exp)\n exp -= 1\n\n return number_in_base_10" ]
[ "0.8108461", "0.79613477", "0.6992336", "0.6729573", "0.631448", "0.6166285", "0.60119784", "0.59899604", "0.5962518", "0.59565884", "0.5881729", "0.5834557", "0.5809722", "0.5784438", "0.57559973", "0.5659558", "0.5654602", "0.5641183", "0.55670154", "0.55575097", "0.5541654", "0.55263627", "0.551486", "0.55122435", "0.5474957", "0.54746103", "0.54744256", "0.54329437", "0.5419766", "0.5415026" ]
0.87544614
0
Test digit 12 min base.
def test_digit_12_min_base(self): expected = 5 digit = 12 assert expected == min_base(digit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_digit_14_min_base(self):\n expected = 6\n digit = 14\n\n assert expected == min_base(digit)", "def test_single_digit_min_base(self):\n expected = 2\n digit = 1\n\n assert expected == min_base(digit)", "def modifier(base):\n return int(math.floor((base - 10) / 2))", "def check(ht, mt, st, pid):\n\n ns_ticks = 0\n shift = 0\n\n diff = (mt - ht + TOTAL_TICKS) % TOTAL_TICKS\n for rep in range(12):\n tmp = diff + rep * TOTAL_TICKS\n if tmp % 11 == 0:\n ns_ticks = tmp / 11\n shift = (ht - ns_ticks + TOTAL_TICKS) % TOTAL_TICKS\n\n if (ns_ticks + shift) % TOTAL_TICKS != ht:\n continue\n\n if (12*ns_ticks + shift) % TOTAL_TICKS != mt:\n continue\n\n if (720*ns_ticks + shift) % TOTAL_TICKS != st:\n continue\n\n # calc_st = (720*ns_ticks + shift) % TOTAL_TICKS\n # if calc_st == st:\n ns = ns_ticks % 1e9\n ns_ticks /= 1e9\n\n secs = ns_ticks % 60\n ns_ticks /= 60\n\n mins = ns_ticks % 60\n ns_ticks /= 60\n\n hrs = ns_ticks\n\n if hrs < 12:\n print(f\"Case #{pid}: {int(hrs)} {int(mins)} {int(secs)} {int(ns)}\")\n return True\n\n return False", "def min_digit(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n s = 10\n while(x>0):\n if(s>(x%10)):\n s = x%10\n x = x//10\n return s", "def check_digit(tracking_number):\n check_digit = 10 - ((sum(itertools.starmap(operator.mul, zip(itertools.cycle((3, 1)), map(int, str(tracking_number))))) + 1) % 10)\n if check_digit == 10:\n check_digit = 0\n return check_digit", "def get_minute(self):\n\n # First we get the first 8 bits stored in the minute register\n # and translate it to an integer\n minute_bcd = self.__read_register(_REGISTER_MINUTE)\n\n # We separate the tens from the digits\n\n tens = (minute_bcd & 0x70) >> 4 # 0x70 = 0b01110000\n digit = (minute_bcd & 0x0F) # 0x0F = 0b00001111\n\n return 10 * (tens) + digit", "def check_time(self, m, s):\r\n if m*60 + s > 5400:\r\n self.unit.s = 0\r\n self.unit.m = 90\r\n return\r\n if s < 0:\r\n s = 0\r\n if m < 0:\r\n m = 0\r\n self.unit.s = s\r\n self.unit.m = m", "def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)", "def isbn_13_check_digit(twelve_digits):\r\n if len(twelve_digits) != 12: return None\r\n try: int(twelve_digits)\r\n except: return None\r\n thirteenth_digit = 10 - int(sum((i % 2 * 2 + 1) * int(x) for i, x in enumerate(twelve_digits)) % 10)\r\n if thirteenth_digit == 10: thirteenth_digit = '0'\r\n return str(thirteenth_digit)", "def check_digits_cpf(x: str, n: int) -> int:\n check_vec = np.flip(np.arange(2, 10 + n))\n digits = np.array(list(x[: 8 + n])).astype(\"int\")\n result = np.dot(check_vec, digits) % 11\n\n return 0 if result < 2 else 11 - result", "def check_mountain_number(n):\n def helper(x, is_incresing):\n if x // 10 == 0:\n return True\n if is_incresing and (x % 10) < ((x // 10) % 10):\n return helper(x // 10, is_incresing)\n return (x % 10) > ((x // 10) % 10) and helper(x // 10, False)\n return helper(n, True)", "def time_to_slot(hh,mm):\r\n hour_slot = hh*4\r\n minute_slot = 0\r\n if mm <= 14:\r\n minute_slot = 1\r\n elif mm <= 29 and mm >=15:\r\n minute_slot = 2\r\n elif mm <= 44 and mm >=30:\r\n minute_slot = 3\r\n elif mm >= 45 and mm <=60:\r\n minute_slot = 4\r\n else:\r\n print(\"Minute value incorrect, please check it\")\r\n\r\n timeslot = hour_slot + minute_slot\r\n return timeslot", "def convert_24hr_12ampm(military_hr): \n\t\n if military_hr == 0:\n hour_ampm_str = \"12am\"\n elif military_hr == 12:\n hour_ampm_str = \"12pm\"\n elif military_hr > 12:\n hour_ampm_str = str(military_hr - 12) + \"pm\"\n else:\n hour_ampm_str = str(military_hr) + \"am\"\n # end of if block\n \n return hour_ampm_str", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def is_valid_month_number(month_number: int) -> bool:\n if 0 < month_number <= 12:\n return True\n else:\n return False", "def day_04_b() -> int:\n return get_min_hash(\"bgvyzdsv\", \"000000\")", "def checkBase(base, n):\n current = 1\n while current < n:\n current = current * base + 1\n return current == n", "def is_valid_month (val):\n if len(val) == 2 and count_digits(val) == 2:\n month = int(val)\n return month > 0 and month < 13\n return False", "def itofm(i):\n return 2 ** (i / 12.0)", "def test_mode_digit():\n print('Testing mode_digit')\n\n # Cases given to test this problem\n assert_equals(1, hw1.mode_digit(12121))\n assert_equals(0, hw1.mode_digit(0))\n assert_equals(2, hw1.mode_digit(-122))\n assert_equals(2, hw1.mode_digit(1211232231))\n\n # Additional cases to test numbers with same digit occurance numbers\n assert_equals(3, hw1.mode_digit(-333000221))\n assert_equals(4, hw1.mode_digit(440011))", "def day_04_a() -> int:\n return get_min_hash(\"bgvyzdsv\", \"00000\")", "def test_integral_time(self):\n test_time = random.randint(0,24)\n test_string = \"AlanTimeZT{0}\".format(test_time)\n self.assertEquals(self.parser.extract_zt(test_string), test_time)", "def start(st_reg_number):\n #st_reg_number = str(st_reg_number)\n number_state_registration_first_digit = st_reg_number[0:3] + '0' + st_reg_number[3: len(st_reg_number)-2]\n weights_first_digit = [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]\n wights_second_digit = [3, 2, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]\n first_digit = st_reg_number[-2]\n second_digit = st_reg_number[-1]\n sum_first_digit = 0\n sum_second_digit = 0\n sum_result_digit = ''\n sum_end = 0\n\n if len(st_reg_number) != 13:\n\n return False\n\n for i in range(0, 12):\n\n sum_first_digit = weights_first_digit[i] * int(number_state_registration_first_digit[i])\n\n sum_result_digit = sum_result_digit + str(sum_first_digit)\n\n for i in range(0, len(sum_result_digit)):\n\n sum_end = sum_end + int(sum_result_digit[i])\n\n if sum_end % 10 == 0:\n\n check_digit_one = 0\n\n elif sum_end < 10:\n\n check_digit_one = 10 - sum_end\n\n elif sum_end > 10:\n\n check_digit_one = (10 - sum_end % 10)\n\n if str(check_digit_one) != first_digit:\n\n return False\n\n number_state_registration_second_digit = st_reg_number + str(check_digit_one)\n\n for i in range(0, 12):\n\n sum_second_digit = sum_second_digit + wights_second_digit[i] * int(number_state_registration_second_digit[i])\n\n check_second_digit = 11 - sum_second_digit % 11\n\n if sum_second_digit == 1 or sum_second_digit == 0:\n\n return second_digit == '0'\n\n else:\n return str(check_second_digit) == second_digit", "def calc_check_digit(number):\n weights = (2, 4, 8, 5, 10, 9, 7, 3, 6)\n return str(sum(w * int(n) for w, n in zip(weights, number)) % 11 % 10)", "def h_12(self):\n return self._pmhour", "def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)", "def task11_time_converter(num):\n if num < 0:\n raise ValueError\n hour = num // 60\n minute = num % 60\n return f'{hour}:{minute}'", "def ms_from_bpm(bpm: float) -> float:\n return 240000 / bpm", "def isbn_10_check_digit(nine_digits):\r\n if len(nine_digits) != 9: return None\r\n try: int(nine_digits)\r\n except: return None\r\n remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)\r\n if remainder == 0: tenth_digit = 0\r\n else: tenth_digit = 11 - remainder\r\n if tenth_digit == 10: tenth_digit = 'X'\r\n return str(tenth_digit)" ]
[ "0.68628234", "0.6560506", "0.5813826", "0.5736882", "0.570259", "0.5691976", "0.55865026", "0.5557803", "0.5555295", "0.54997677", "0.5392188", "0.5373415", "0.5369537", "0.5350108", "0.5335272", "0.53054", "0.5272494", "0.52332306", "0.52300835", "0.5220331", "0.5219849", "0.52194065", "0.5216872", "0.5213075", "0.519623", "0.51923394", "0.51860714", "0.51430565", "0.51427794", "0.51317203" ]
0.7900096
0
Test digit 14 min base.
def test_digit_14_min_base(self): expected = 6 digit = 14 assert expected == min_base(digit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_digit_12_min_base(self):\n expected = 5\n digit = 12\n\n assert expected == min_base(digit)", "def test_single_digit_min_base(self):\n expected = 2\n digit = 1\n\n assert expected == min_base(digit)", "def min_digit(x):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n s = 10\n while(x>0):\n if(s>(x%10)):\n s = x%10\n x = x//10\n return s", "def modifier(base):\n return int(math.floor((base - 10) / 2))", "def min_parts():\n # you must replace this with your own value\n return 1155", "def min_parts():\n # you must replace this with your own value\n return 1155", "def checkBase(base, n):\n current = 1\n while current < n:\n current = current * base + 1\n return current == n", "def fix_teen(n):\n if 13<=n<=14 or 17<=n<=19:\n return 0\n else:\n return n", "def check_digits_cpf(x: str, n: int) -> int:\n check_vec = np.flip(np.arange(2, 10 + n))\n digits = np.array(list(x[: 8 + n])).astype(\"int\")\n result = np.dot(check_vec, digits) % 11\n\n return 0 if result < 2 else 11 - result", "def day_04_b() -> int:\n return get_min_hash(\"bgvyzdsv\", \"000000\")", "def choose_bin_base() -> int:\n return npr.choice((2, 8, 16))", "def min_temp(self):\n return 16", "def check_digit(tracking_number):\n check_digit = 10 - ((sum(itertools.starmap(operator.mul, zip(itertools.cycle((3, 1)), map(int, str(tracking_number))))) + 1) % 10)\n if check_digit == 10:\n check_digit = 0\n return check_digit", "def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)", "def day_04_a() -> int:\n return get_min_hash(\"bgvyzdsv\", \"00000\")", "def base_pick():\n\n rnd = generate_random(2, 15)\n return rnd", "def check_random_bc(seq):\n if seq.startswith('TGATC'):\n return seq[5:]\n else:\n return seq[:16]", "def solution(max_base: int = 5) -> int:\n freqs = defaultdict(list)\n num = 0\n\n while True:\n digits = get_digits(num)\n freqs[digits].append(num)\n\n if len(freqs[digits]) == max_base:\n base = freqs[digits][0] ** 3\n return base\n\n num += 1", "def isbn_10_check_digit(nine_digits):\r\n if len(nine_digits) != 9: return None\r\n try: int(nine_digits)\r\n except: return None\r\n remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)\r\n if remainder == 0: tenth_digit = 0\r\n else: tenth_digit = 11 - remainder\r\n if tenth_digit == 10: tenth_digit = 'X'\r\n return str(tenth_digit)", "def rand10():\n res = 40\n while res >= 40:\n res = 7 * (rand7() - 1) + (rand7() - 1)\n return res % 10 + 1", "def is_acceptable_multiplier(m):\n return 1 < m < (2 ** 61 - 1)", "def main(destination_base, max_number, decimal_number):\n if 2 <= destination_base <= 9:\n if 0 <= decimal_number <= max_number:\n converted_number = base_conversion(destination_base, decimal_number)\n print(f\"the converted number is: {converted_number}\")\n else:\n print(\"invalid input for base 10 number\")\n else:\n print(\"invalid input for destination base\")", "def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n return result", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def getrandbits(k: int) -> int:\n ...", "def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)", "def to_base_ten(self, value, base):\r\n numeral = self.numeral\r\n\r\n if not 2 <= base <= 36:\r\n raise ValueError('Base must be between 2 and 36')\r\n \r\n x = str(value)\r\n if '.' in x: #If value has a fractional part\r\n int_result = 0\r\n frac_result = 0\r\n int_part, frac_part = x.split('.')[0], x.split('.')[1] #split the value at '.' to two parts and return tuple\r\n int_result += int(int_part, base) #performing addition to confirm result is an integer\r\n \r\n for i in range(1, len(frac_part)+ 1): #exponent for fractional part starts from -1 to -n\r\n try:\r\n frac_result += (int(frac_part[i-1]) / pow(base, i)) #If no alphabeth in fractional part\r\n except Exception:\r\n frac_result += (int(numeral.index(frac_part[i-1])) / pow(base, i)) #Else look up value in numeral\r\n \r\n return int_result + frac_result #The retured value is an integer\r\n\r\n else: #If value is a whole number\r\n return int(str(value), base)", "def test_short_prices(self):\n date = datetime(2016, 11, 15)\n prices = [2] * 2\n expected_sequence = 22\n expected_week = 46\n self.assertEqual(\n star_barcode.date_to_sequence_and_week(\n date=date, price_codes=prices),\n (expected_sequence, expected_week)\n )", "def get_rand_senary(ndigits, base=0):\n # Algorithm from https://stackoverflow.com/questions/137783/expand-a-random-range-from-1-5-to-1-7/891304#891304\n senary_digits = []\n state = 0\n pow1 = 1\n pow2 = 6\n while len(senary_digits) < ndigits:\n if state // pow1 == (state + pow2) // pow1:\n result = state // pow1\n state = (state - result * pow1) * 6\n pow2 *= 6\n senary_digits.append(result+base)\n else:\n state = 256 * state + pow2 * ord(os.urandom(1))\n pow1 *= 256\n # Keep the size of the huge numbers under a googol so it doesn't slow to a crawl.\n if pow1 > 10e100 or pow2 > 10e100:\n pow1 = 1\n pow2 = 6\n state = 0\n return ''.join(map(str, senary_digits))", "def start(st_reg_number):\n #st_reg_number = str(st_reg_number)\n number_state_registration_first_digit = st_reg_number[0:3] + '0' + st_reg_number[3: len(st_reg_number)-2]\n weights_first_digit = [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]\n wights_second_digit = [3, 2, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]\n first_digit = st_reg_number[-2]\n second_digit = st_reg_number[-1]\n sum_first_digit = 0\n sum_second_digit = 0\n sum_result_digit = ''\n sum_end = 0\n\n if len(st_reg_number) != 13:\n\n return False\n\n for i in range(0, 12):\n\n sum_first_digit = weights_first_digit[i] * int(number_state_registration_first_digit[i])\n\n sum_result_digit = sum_result_digit + str(sum_first_digit)\n\n for i in range(0, len(sum_result_digit)):\n\n sum_end = sum_end + int(sum_result_digit[i])\n\n if sum_end % 10 == 0:\n\n check_digit_one = 0\n\n elif sum_end < 10:\n\n check_digit_one = 10 - sum_end\n\n elif sum_end > 10:\n\n check_digit_one = (10 - sum_end % 10)\n\n if str(check_digit_one) != first_digit:\n\n return False\n\n number_state_registration_second_digit = st_reg_number + str(check_digit_one)\n\n for i in range(0, 12):\n\n sum_second_digit = sum_second_digit + wights_second_digit[i] * int(number_state_registration_second_digit[i])\n\n check_second_digit = 11 - sum_second_digit % 11\n\n if sum_second_digit == 1 or sum_second_digit == 0:\n\n return second_digit == '0'\n\n else:\n return str(check_second_digit) == second_digit" ]
[ "0.73744816", "0.67325693", "0.5823343", "0.5726001", "0.5668755", "0.5668755", "0.5592649", "0.55830795", "0.5475364", "0.54414123", "0.5374844", "0.5290579", "0.5288535", "0.52868295", "0.5286815", "0.5249504", "0.5242419", "0.5217242", "0.52117115", "0.5208738", "0.51945275", "0.51819307", "0.516338", "0.51555043", "0.5144843", "0.5140783", "0.5132707", "0.51207006", "0.5120504", "0.5116788" ]
0.8115787
0
Set up function that creates a test client, with a new user and a regular user that will be listed in the admin page.
def setUp(self): # Create client self.client = Client() # Create the admin user self.admin_user = get_user_model().objects.create_superuser( email='[email protected]', password='adminTesting123' ) # Login the admin user self.client.force_login(self.admin_user) # Create the reqular user self.user = get_user_model().objects.create_user( email='[email protected]', password='userTesting123', name='Test user full name' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.client = Client()\n #creamos un usuario en la db\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def setup_test_user(self):\n self.setup_test_tenant()\n self.test_user = rand_name('test_user_')\n self.test_password = rand_name('pass_')\n self.test_email = self.test_user + '@testmail.tm'\n resp, self.user = self.client.create_user(self.test_user,\n self.test_password,\n self.tenant['id'],\n self.test_email)\n self.users.append(self.user)", "def setUp(self):\n self.client = Client()\n self.user = User.objects.create_user('testuser', '[email protected]', 'q2w3E$R%')", "def test_create_client(self):\n pass", "def setUp(self):\n #creamos un cliente http\n self.client = Client()\n #creamos un usuario en la base de datos de prueba\n self.user = User.objects.create_user('super', '[email protected]', 'super')", "def test_client_create(self):\n pass", "def setUp(self):\r\n super(SysadminBaseTestCase, self).setUp()\r\n self.user = UserFactory.create(username='test_user',\r\n email='[email protected]',\r\n password='foo')\r\n self.client = Client()", "def setUp(self):\n self.app = app.test_client()\n self.new_user_login = {\n 'username': 'daniel',\n 'password': '[email protected]'\n }\n self.new_user_info = {\n 'username': 'daniel',\n 'fullname': 'daniel jambo',\n 'email': '[email protected]',\n 'password': '[email protected]'\n }", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def setUp(self):\r\n\r\n # Get the Flask test client\r\n self.client = app.test_client()\r\n app.config['TESTING'] = True\r\n\r\n # Connect to test database\r\n connect_to_db(app, \"postgresql:///test_db\")\r\n\r\n # Create tables and add sample data\r\n db.create_all()\r\n \r\n self.user = crud.create_user(email='[email protected]', password = 'K9#n*Hs73', fname = 'Mary', lname = 'Crews', job = 'Night Auditor',\r\n current_location = 'Florida', place_of_birth = 'Iowa', dob ='1977-11-03', isAdmin =False)", "def test_create(self):\n urls = [reverse('api:user-list')]\n data = {\n \"username\": \"newuser\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": [self.admin_client]\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def setUp(self):\n self.user = {\n \"Email\": \"[email protected]\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client", "def setUp(self):\n self.new_user = User(\"Juma\",\"12345\")", "def setUp(self):\n self.new_user = User(username='burens', password='12345')", "def setUp(self):\n self.new_user = User(\"Hamisi\",\"python\")", "def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')", "def setUp(self):\n\n self.user = self.client.users.create({})", "def setUp(self):\n self.url = reverse('profile')\n\n self.first_user = CustomUser.objects.create(\n id=1001,\n email='[email protected]',\n phone_number='+380111111111',\n is_active=True\n )\n self.first_user.set_password('1111Bb')\n self.first_user.save()\n\n self.user_profile = UserProfile.objects.create(\n id=1111,\n user=self.first_user,\n first_name='Jhon',\n last_name='Doe'\n )\n\n self.second_user = CustomUser.objects.create(\n id=2002,\n email='[email protected]',\n phone_number='+380111111112',\n )\n self.second_user.set_password('2222Bb')\n self.second_user.save()\n\n self.client = Client()\n self.client.login(email='[email protected]', password='1111Bb')\n\n self.second_client = Client()\n self.second_client.login(email='[email protected]', password='2222Bb')", "def setUp(self):\n\n self.new_user = User(\"Danlon\", \"Situma\", \"Dasi202\", \"passcode\")", "def test_create_user(self):\n #open the django admin page.\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin\")\n )\n\n #fill in login information of admin\n username = self.selenium.find_element_by_id(\"id_username\")\n username.send_keys(\"admin\")\n password = self.selenium.find_element_by_id(\"id_password\")\n password.send_keys(\"admin\")\n\n #locate login button and click it.\n self.selenium.find_element_by_xpath('//input[@value=\"Inloggen\"]').click()\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin/auth/user/add/\")\n )\n\n # Fill the create user form with username and password\n self.selenium.find_element_by_id(\"id_username\").send_keys(\"test\")\n self.selenium.find_element_by_id(\"id_password1\").send_keys(\"test1234\")\n self.selenium.find_element_by_id(\"id_password2\").send_keys(\"test1234\")\n\n # Forms can be submitted directly by calling its method submit\n self.selenium.find_element_by_id(\"user_form\").submit()\n self.assertIn(\"Change user\", self.selenium.title)", "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()\n\n\n self.user = {\n\t \"firstname\": \"Michael\",\n\t \"lastname\": \"Mbugua\",\n \"othername\": \"Mike\",\n \"email\": \"[email protected]\",\n \"phoneNumber\": \"0708453901\",\n \"username\": \"Thomas\",\n \"password\": \"Aw3someSauce\"\n \n }", "def setUp(self):\n self.client = Client()\n self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')\n self.test_user.is_superuser = True\n self.test_user.is_active = True\n self.test_user.save()\n self.assertEqual(self.test_user.is_superuser, True)\n login = self.client.login(username='testuser', password='testpassword')\n self.failUnless(login, 'Could not log in')", "def setUp(self):\n self.client = Client()\n self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')\n self.test_user.is_superuser = True\n self.test_user.is_active = True\n self.test_user.save()\n self.assertEqual(self.test_user.is_superuser, True)\n login = self.client.login(username='testuser', password='testpassword')\n self.failUnless(login, 'Could not log in')", "def setUp(self):\n \n self.client = Client()\n self.test_user = User.objects.create_user('testuser', '[email protected]', 'testpassword')\n self.test_user.is_superuser = True\n self.test_user.is_active = True\n self.test_user.save()\n self.assertEqual(self.test_user.is_superuser, True)\n login = self.client.login(username='testuser', password='testpassword')\n self.failUnless(login, 'Could not log in')", "def setUp(self):\n user = User.objects.create(username=\"nerd\")\n self.client = APIClient()\n self.client.force_authenticate(user=user)\n self.post_data = {'name':'Go to Ibiza', 'owner': user.id}\n self.response = self.client.post(\n reverse('create'),\n self.post_data,\n format=\"json\")", "def setUp(self):\n # create our test user\n self.test_user1 = get_user_model().objects.create(**USER1_PARAMS)\n self.test_user2 = get_user_model().objects.create(**USER2_PARAMS)\n self.rogue_user = get_user_model().objects.create(**ROGUE_USER_PARAMS)\n self.test_admin = get_user_model().objects.create(**ADMIN_USER_PARAMS)\n site = Site.objects.get_current()\n self.test_blog = Blog.objects.create(site=site, owner=self.test_user1,\n **TEST_BLOG_PARAMS)\n self.test_category1 = Category.objects.create(\n blog=self.test_blog,\n **CAT1_PARAMS\n )\n self.client = Client()\n # self.post = Post.objects.create(\n # title=\"Test User 1 Post\",\n # body=\"This is some stuff.\\n\\nSome stuff, you know.\",\n # blog=self.test_blog,\n # author=self.test_user1.author\n # )\n # self.post.save()\n # enable remote access for test_user1\n self.test_user1.author.remote_access_enabled = True\n self.test_user1.author.save()\n\n # disable remote access for test_user2\n self.test_user2.author.remote_access_enabled = False\n self.test_user2.author.save()\n\n self.rogue_user.author.remote_access_enabled = True\n self.rogue_user.author.save()\n\n self.test_admin.author.remote_access_enabled = True\n self.test_admin.author.save()", "def setUp(self):\n self.new_user = User.objects.create_user(first_name='John', last_name='Doe', username='john_doe', email='[email protected]', bio='I am new here.', password='test_password', website='example.com', social_media={\n 'facebook':'Facebook link',\n 'Dribble': 'Dribble link',\n })" ]
[ "0.7668336", "0.7579043", "0.75625336", "0.7551012", "0.7518695", "0.7506115", "0.74307555", "0.73853904", "0.7229625", "0.7229625", "0.7229625", "0.7219692", "0.72146237", "0.72099113", "0.7081468", "0.7060547", "0.70462763", "0.704133", "0.7039718", "0.6998332", "0.6997446", "0.6996139", "0.69772446", "0.6967035", "0.6962337", "0.6962337", "0.6950535", "0.6943278", "0.6937568", "0.6935635" ]
0.794958
0
Write RINEX navigation file for 3 days (rundate +/1 day)
def rinex2_nav(dset): for date_offset in range(-1, 2): write_one_day(dset, dset.analysis["rundate"] + timedelta(days=date_offset))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_one_day(dset, date):\n brdc = apriori.get(\n \"orbit\",\n rundate=dset.analysis[\"rundate\"],\n system=tuple(dset.unique(\"system\")),\n station=dset.vars[\"station\"],\n apriori_orbit=\"broadcast\",\n )\n\n meta = brdc.dset_edit.meta[date.strftime(\"%Y-%m-%d\")]\n data = brdc.dset_edit # TODO: Another possibility: brdc.dset_raw\n file_vars = {**dset.vars, **dset.analysis}\n file_vars[\"doy\"] = config.date_vars(date)[\n \"doy\"\n ] # TODO: workaround, so that all files are written in the same working directory -> does not work if year is changed.\n\n with config.files.open(\"output_rinex2_nav\", file_vars=file_vars, mode=\"wt\") as fid:\n\n #\n # Write RINEX navigation header\n #\n if meta[\"file_type\"] == \"N\":\n file_type = \"NAVIGATION DATA\"\n\n fid.write(\"{:>9s}{:11s}{:40s}RINEX VERSION / TYPE\\n\".format(meta[\"version\"], \"\", file_type))\n fid.write(\n \"{:20s}{:20s}{:20s}PGM / RUN BY / DATE\\n\".format(meta[\"program\"], meta[\"run_by\"], meta[\"file_created\"])\n )\n\n for line in meta[\"comment\"]:\n fid.write(\"{:60s}COMMENT\\n\".format(line))\n fid.write(\n \"{:>14.4e}{:>12.4e}{:>12.4e}{:>12.4e}{:10s}ION ALPHA\\n\"\n \"\".format(\n meta[\"iono_para\"][\"GPSA\"][\"para\"][0],\n meta[\"iono_para\"][\"GPSA\"][\"para\"][1],\n meta[\"iono_para\"][\"GPSA\"][\"para\"][2],\n meta[\"iono_para\"][\"GPSA\"][\"para\"][3],\n \"\",\n )\n )\n fid.write(\n \"{:>14.4e}{:>12.4e}{:>12.4e}{:>12.4e}{:10s}ION BETA\\n\"\n \"\".format(\n meta[\"iono_para\"][\"GPSB\"][\"para\"][0],\n meta[\"iono_para\"][\"GPSB\"][\"para\"][1],\n meta[\"iono_para\"][\"GPSB\"][\"para\"][2],\n meta[\"iono_para\"][\"GPSB\"][\"para\"][3],\n \"\",\n )\n )\n # TODO fid.write('{:>22.12e}{:>19.12e}{:>9d}{:>9d}{:1s}DELTA-UTC: A0,A1,T,W\\n'\n # ''.format(meta['a0'], meta['a1'], int(meta['t']), int(meta['w']), ''))\n fid.write(\"{:>6d}{:54s}LEAP SECONDS\\n\".format(int(meta[\"leap_seconds\"][\"leap_seconds\"]), \"\"))\n fid.write(\"{:60s}END OF HEADER\\n\".format(\"\"))\n\n #\n # Write RINEX navigation data\n #\n # TODO:\n # for drow in data.get_rows():\n # fid.write('{d.sat:2d} {d.time:%Y%m%d %H%M%S} {d.inc0:13.4f}'.format(d=drow))\n # fid.write(' {d.hurra:14.10f} ...'.format(d=drow))\n\n for idx in range(0, data.num_obs):\n sat = int(data.satellite[idx][1:3])\n d = data.time.gps.datetime[idx]\n fid.write(\n \"{:2d}{:>3s}{:>3d}{:>3d}{:>3d}{:>3d}{:>5.1f}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(\n sat,\n str(d.year)[2:4],\n d.month,\n d.day,\n d.hour,\n d.minute,\n d.second,\n data.sat_clock_bias[idx],\n data.sat_clock_drift[idx],\n data.sat_clock_drift_rate[idx],\n )\n )\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.iode[idx], data.crs[idx], data.delta_n[idx], data.m0[idx])\n )\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.cuc[idx], data.e[idx], data.cus[idx], data.sqrt_a[idx])\n )\n # TODO: toe depends on GNSS system time -> for BeiDou it has to be changed\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.toe.gps.gpssec[idx], data.cic[idx], data.Omega[idx], data.cis[idx])\n )\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.i0[idx], data.crc[idx], data.omega[idx], data.Omega_dot[idx])\n )\n # TODO: gnss_week depends on GNSS -> for BeiDou it has to be changed\n # TODO: codes_l2 only valid for GPS and QZSS -> Galileo data_source; rest None\n # TODO: 'G': 'l2p_flag', 'J': 'l2p_flag'\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.idot[idx], data.codes_l2[idx], data.gnss_week[idx], data.l2p_flag[idx])\n )\n # TODO: 'G': 'iodc', 'J': 'iodc', 'E': 'bgd_e1_e5b', 'C': 'tgd_b2_b3'\n # TODO: 'G': 'tgd', 'J': 'tgd', 'E': 'bgd_e1_e5a', 'C': 'tgd_b1_b3', 'I': 'tgd'\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.sv_accuracy[idx], data.sv_health[idx], data.tgd[idx], data.iodc[idx])\n )\n # TODO: transmission_time depends on GNSS system time -> for BeiDou it has to be changed\n # TODO: fit_interval only valid for GPS and QZSS -> for BeiDou age_of_clock_corr; rest None\n fid.write(\n \"{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\\n\"\n \"\".format(data.transmission_time.gps.gpssec[idx], data.fit_interval[idx], 0.0, 0.0)\n )", "def write_file(date, num_days):\n month = '{0:02d}'.format(date.month)\n day = '{0:02d}'.format(date.day)\n\n with open(file_name, 'a+') as out_file:\n out_file.write('{}-{},{}\\n'.format(month, day, num_days))", "def test_rotation(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n days = [(self.path + \".\" + log.suffix(day * 86400)) for day in range(3)]\n\n # test automatic rotation\n log._clock = 0.0 # 1970/01/01 00:00.00\n log.write(\"123\")\n log._clock = 43200 # 1970/01/01 12:00.00\n log.write(\"4567890\")\n log._clock = 86400 # 1970/01/02 00:00.00\n log.write(\"1\" * 11)\n self.assertTrue(os.path.exists(days[0]))\n self.assertFalse(os.path.exists(days[1]))\n log._clock = 172800 # 1970/01/03 00:00.00\n log.write(\"\")\n self.assertTrue(os.path.exists(days[0]))\n self.assertTrue(os.path.exists(days[1]))\n self.assertFalse(os.path.exists(days[2]))\n log._clock = 259199 # 1970/01/03 23:59.59\n log.write(\"3\")\n self.assertFalse(os.path.exists(days[2]))", "def writeRecentItems(self, filename):\n if self.recent_items == None:\n return\n else:\n try:\n file_open = open(filename, 'w')\n text = \"\"\n for key in self.recent_items:\n full_path = self.recent_items[key] + key\n new_line = full_path + '\\n'\n text += new_line\n file_open.write(text)\n file_open.close()\n \n except IOError:\n print 'Unable to write the recent files to file: %s\\n'\\\n 'No recent files will be written' % str(filename)", "def write_history_file(config):\n readline.set_history_length(int(config.get('history', 'length')))\n readline.write_history_file(config.rh_get_data('historyFile'))", "def WriteDiary():\r\n from datetime import datetime\r\n\r\n diaryname = _getPOSCAR()\r\n diary = open(diaryname, \"w\")\r\n diary.write('***' + str(datetime.now()) + '***' + '\\n')\r\n diary.write('## ' + diaryname + '\\n')\r\n diary.close()\r\n _CopyWriteDiary('Readme', diaryname)\r\n _CopyWriteDiary('INCAR', diaryname)\r\n _CopyWriteDiary('KPOINTS', diaryname)\r\n _CopyWriteDiary('POSCAR', diaryname)\r\n _CopyWriteDiary('POTCAR', diaryname)\r\n os.rename(diaryname, diaryname + '.md')", "def write_to_file_z(path):\n path1 = path + \"/z_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y0z%ske%s.mac\" %(dz*z + z_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y0z%ske%s.root\"\\n' %(dz*z + z_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 0 %s\\n\" % (dz*z + z_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def getPreviousNightlyPath( numDaysInPast=1 ):\n\n myPath= os.environ.get(\"NICOS_PROJECT_RELNAME_COPY\",\"\")\n #replace rel_x with rel_(x-1)\n for i in range(0,7):\n if (\"rel_%d\" % i) in myPath:\n myPath = myPath.replace( (\"rel_%d\" % i), (\"rel_%d\" % ( (i-numDaysInPast)%7 )) )\n break\n refFile = os.environ.get(\"NICOS_COPY_HOME\",\"\") + \"/\" + myPath + \"/NICOS_area/NICOS_atntest\" + os.environ.get(\"NICOS_SUFFIX\",\"\") + \"/\" + os.path.basename(os.environ.get(\"ATN_WORK_AREA\",\"\"))\n\n return refFile", "def writing_get_date_ordered(file_name):\n result = str(reports.get_date_ordered(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def print_to_file(start, stop, time_worked, work_text, work_log):\n today = datetime.date.today()\n\n record = ' || %.2f || %.2f || %.4f hours || %s\\n' % (start, stop, time_worked/3600, work_text)\n\n #if it is a new file you have the option to set a start time for the project\n # and how many hours a week you want to work\n if not os.path.isfile(work_log):\n while True:\n option = raw_input('\\nThis is a new log, would you like to specify a start date and a hours per week goal for the project? (y/n): ').lower()\n if option == 'y':\n date = raw_input('\\nplease enter the start date of the project (dd-mm-yyyy): ')\n hours_per_week = raw_input('\\nplease enter the number of hours you intend to work on the project per week: ')\n try:\n datetime.datetime.strptime(date, '%d-%m-%Y')\n if hours_per_week.isdigit():\n f = open(work_log, 'a')\n f.write('#! || ' + date + ':' + hours_per_week + '\\n')\n f.close()\n break\n else:\n print \"\\nPlease enter a valid number for hours to work!\\n\"\n except ValueError:\n print \"\\nPlease enter a valid date!\\n\"\n\n else:\n break\n\n\n f = open(work_log, 'a')\n print '\\n\\n' + today.strftime('%b-%d-%Y') + record\n f.write(today.strftime('%b-%d-%Y') + record)\n f.close()", "def update_history(item, days, rental_price, total):\n transaction = '{}, {}, {}, {} \\n'.format(item, days, rental_price, total)\n with open('history.txt', 'a') as file:\n file.write(transaction)", "def date_diagnostic(scenario, dates):\n running = 0\n with open(\"scenario%s_dates.txt\" % (scenario,), \"w\") as fp:\n fp.write(\"date,total\\n\")\n for date in pd.date_range(APR15, MAY30):\n hits = [d for d in dates if d == date]\n running += len(hits)\n fp.write(\"%s,%s\\n\" % (date, running))", "def save_history():\n\n mid = get_mid()\n back_file = contact_name + \"_\" + today\n\n if not os.path.isdir(back_path):\n print('WARNING: o {} directory found, creating.').format(back_path)\n os.mkdir(back_path)\n else:\n print(\"OK: {} found.\".format(back_path))\n\n os.chdir(back_path)\n with open(back_file, 'w') as bf:\n for mes in get_todays_history(mid):\n data = \"{}\\n\".format(mes)\n bf.write(data)", "def archive(self):\n\n archive_date = self.time_file.file_date.strftime('%Y-%m-%d')\n self.record('ARCHIVE %s %s' % (archive_date,\n self.time_file.short_info()))\n\n self.keep_only_archive()", "def day3_case():\n print(\"Day 3 Start\")\n #print(\"Steps:\", day3.get_steps(325489))\n print(\"Steps:\", day3.get_steps(36807888888888))", "def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])", "def call_orthologs(self):\n ortholog_call = oma.OrthologFinder(self.sequence)\n try:\n self.orthologs = ortholog_call.get_HOGs()\n except exceptions.RequestException:\n self.orthologs = ortholog_call.get_orthologs()\n\n with open(\"%s.orth\" %(self.name), 'w') as o_file:\n o_file.write(self.orthologs)\n\n return os.getcwd() + os.sep + '%s.orth'%(self.name)", "def create_e3d_file(self,path='./'):\n dt=0.606*self.model_parameters['dh']/np.max(self.velocity_model['vp']) # dt needs to satify the courant condition\n t=int(self.model_parameters['duration']/dt)\n \n # Check path exists, if not create one\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Create e3d parameter file\n f=open('%s%s_e3dmodel.txt'%(path,self.model_name),'w')\n f.write(\"grid x=%s z=%s dh=%s b=2 q=1\\ntime dt=%0.5f t=%s\\n\"%(self.model_parameters['xmax'],self.model_parameters['zmax'],self.model_parameters['dh'],dt,t))\n f.write(\"block p=%s s=%s r=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][0],self.velocity_model['vs'][0],self.velocity_model['rho'][0]))\n \n for i in range(1,len(self.velocity_model['vp'])-1):\n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][i],self.velocity_model['vs'][i],self.velocity_model['rho'][i],\n self.velocity_model['depth'][i],self.velocity_model['depth'][i+1]))\n \n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\\n\"%(self.velocity_model['vp'][i+1],self.velocity_model['vs'][i+1],self.velocity_model['rho'][i+1],\n self.velocity_model['depth'][i+1],self.model_parameters['zmax'])) # extend to the based of the model \n \n f.write(\"visual movie=5\\n\\n\")\n\n if self.source['src_type']!=4:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'])) \n else:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s Mxx=%s Myy=%s Mzz=%s Mxy=%s Mxz=%s Myz=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'],self.source['mt'][0],self.source['mt'][1],self.source['mt'][2],self.source['mt'][3],self.source['mt'][4],self.source['mt'][5])) \n\n for r in range(len(self.receivers['recxs'])):\n f.write('sac x=%0.3f z=%0.3f file=%s\\n'%(self.receivers['recxs'][r],self.receivers['reczs'][r],self.model_name))\n\n f.write(\"visual sample=0.1 movie=1 scale=10000000000/n\")\n f.close()\n \n print('File created: %s%s_e3dmodel.txt'%(path,self.model_name))", "def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()", "def main():\r\n\r\n directory = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n path = os.path.join(directory, 'dump_3')\r\n if not (os.path.exists(path)):\r\n os.mkdir(path)\r\n\r\n for date in range(1, 31):\r\n # date-month-year\r\n # file_name1 = path + '\\\\' + str(date) + '-8-2020' + '_file1.txt'\r\n\r\n # year-month-date\r\n # file_name1 = path + '\\\\' + '2020-08-' + str(date) + '_file3.txt'\r\n\r\n # month_year_date\r\n file_name1 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file5.txt'\r\n\r\n # date-month-year\r\n # file_name2 = path + '\\\\' + str(date) + '-8-2020' + '_file2.txt'\r\n\r\n # year-month-date\r\n # file_name2 = path + '\\\\' + '2020-08-' + str(date) + '_file4.txt'\r\n\r\n # month_year_date\r\n file_name2 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file6.txt'\r\n\r\n rows = []\r\n for row in range(100):\r\n string = 'asddfgfhgkhjghkweoriuywoipywbnxvnmznvnmbatr'\r\n rows.append(string)\r\n with open(file_name1, 'w') as f1, open(file_name2, 'w') as f2:\r\n f1.writelines(rows)\r\n f2.writelines(rows)", "def create_report(folderpath):\n\n outputfolder = create_folder(DEFAULT_OUTPUT_FOLDER)\n\n folderpath = os.path.expanduser(folderpath)\n updatesByHour = collections.defaultdict(list)\n\n now = datetime.now()\n\n for root, folders, files in os.walk(folderpath, followlinks=False):\n for filename in files:\n if filename not in IGNORE_THESE_FILES:\n filepath = pathlib.Path(root, filename)\n mtime = datetime.fromtimestamp(filepath.stat().st_mtime)\n\n if mtime.year == now.year and mtime.month == now.month:\n # For now only deal with this month\n mtime_str = mtime.strftime(\"%Y-%m-%d %H:00\")\n updatesByHour[mtime_str].append((root,filename))\n\n outputFilePath = pathlib.Path(outputfolder, now.strftime(\"%Y-%m.md\"))\n\n with open(outputFilePath, \"w\") as output_file:\n output_file.write(\"# \"+folderpath+\"\\n\")\n for updateTime in sorted(updatesByHour.keys()):\n output_file.write(\"## \"+updateTime+\"\\n\")\n previous_root = None\n previous_pattern=None\n s=\"\"\n for root, filename in sorted(updatesByHour[updateTime]):\n if not previous_root == root:\n # Print a Directory heading\n this_folder=root[len(folderpath):]\n if not len(this_folder.strip()):\n this_folder=folderpath\n output_file.write(\"### \"+this_folder+\" \\n\")\n this_pattern=re.sub(\"[0-9]\",\"x\",filename)\n if not previous_pattern==this_pattern:\n if len(s):\n listItem = \"* \" + s \n output_file.write(listItem[:-2]+\"\\n\")\n s=\"\"\n s=s+str(filename)+\", \"\n previous_root = root\n previous_pattern=this_pattern", "def _create_done_file(topdatadir, startdate, model_forcing):\n path = f\"{topdatadir}/cf_{model_forcing}_\"\n path += f\"{startdate.year:04d}{startdate.month:02d}/done\"\n pathlib.Path(path).touch()", "def save_history(cube, field, filename): \n\n history.append(cube.attributes['history'])", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def write_to_file(start_runtime, contents, write_mode='a'):\n with open(f\"{start_runtime}.txt\", write_mode) as f:\n f.write(\"Filename\\t\\tMaxTrack\\tNumInst\\t\\tTimeSig\\t\\tTPB\\n\")\n f.write(contents)", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def write_to_file_x(path):\n path1 = path + \"/x_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x%sy0z0ke%s.mac\" %(dx*x + x_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0\\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x%sy0z0ke%s.root\"\\n' %(dx*x + x_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set %s 0 0\\n\" % (dx*x + x_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def write_s3_file(data, date):\n logger.info(\"Writing history file to S3.\")\n bucket = os.getenv(\"SPOTIFY_BUCKET_NAME\")\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\")\n s3 = boto3.client('s3')\n data = json.dumps(data)\n s3.put_object(Bucket=bucket, Key=\"%s/%s.json\" % (path, date), Body=data)", "def main(output, yesterday_option):\n if yesterday_option:\n path = output_yesterday()\n else:\n path = journal_today()\n\n edit_or_output(output=output, path=path)\n\n LOG.info('Otter Pilot journal reporting for duty!')", "def csvOutput(cycle, fctimes, beachdata, offshoredata, surfdata, fname='isurf_output.csv', outdir='.'):\n\n datestr = cycle.strftime('%Y%m%d00')\n\n with open(outdir+'/%s' %fname,'w') as outp:\n outp.write(datestr+'\\r\\n')\n for isite in range(len(beachdata['name'])):\n outp.write('\\r\\n')\n outp.write('%s' %beachdata['name'][isite] + '\\r\\n')\n outp.write('%d' %beachdata['type'][isite] + '\\r\\n')\n #outp.write('TI Hsmo Tpmo Dmo Hseq Tpeq DmEq Hsbr Dpbr\\r\\n')\n #outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Tide,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n outp.write('LT,Wspd,Wdir,Hsmo,Tpmo,Dmo,Hseq,Tpeq,DmEq,Hsbr,Dpbr,Hlbr,Hhbr,BT\\r\\n')\n\n\t # write out to file\n for itime in range(len(fctimes)):\n\n # write out the data values to file\n\t #outp.write ('%02d' %fctimes[lp] + ' %4.2f %4.1f %3d' %tuple([hm0[lp,isite], tp[lp,isite], dirn[lp,isite]]) + \\\n # ' %4.2f %4.1f %3d' %tuple([hsshwd[lp,isite], tpshwd[lp,isite], reldir[lp,isite]]) + ' %4.2f %4.2f' %tuple([hsbkinit[lp,isite], dpsat[lp,isite]]) + '\\r\\n')\n\t outp.write('%02d' %fctimes[itime] + \\\n ',%4.1f' %offshoredata['wspd'][itime,isite] + \\\n #',%3d' %offshoredata['wdir'][itime,isite] + \\\n ',%4.2f' %offshoredata['hm0'][itime,isite] + \\\n ',%4.1f' %offshoredata['tp'][itime,isite] + \\\n ',%3d' %offshoredata['dirn'][itime,isite] + \\\n ',%4.2f' %surfdata['shorewardHs'][itime,isite] + \\\n ',%4.1f' %surfdata['shorewardT'][itime,isite] + \\\n ',%3d' %surfdata['relativeDirn'][itime,isite] + \\\n ',%4.2f' %surfdata['breakerHs'][itime,isite] + \\\n ',%4.2f' %surfdata['saturatedDepth'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in3'][itime,isite] + \\\n ',%4.2f' %surfdata['Hb1in10'][itime,isite] + \\\n ',%1d' %surfdata['breakerType'][itime,isite] + '\\r\\n')\n outp.close()" ]
[ "0.6438272", "0.53687155", "0.52514434", "0.5022941", "0.49938595", "0.49613258", "0.49171513", "0.48830613", "0.48462123", "0.48399088", "0.4824141", "0.4767896", "0.47020718", "0.46944073", "0.4677153", "0.46443874", "0.46366394", "0.45609674", "0.45609015", "0.4550348", "0.45437038", "0.4535608", "0.45273358", "0.44948754", "0.44934517", "0.44913235", "0.44817445", "0.44773877", "0.44701394", "0.446332" ]
0.71589833
0
Write RINEX navigation file for given date
def write_one_day(dset, date): brdc = apriori.get( "orbit", rundate=dset.analysis["rundate"], system=tuple(dset.unique("system")), station=dset.vars["station"], apriori_orbit="broadcast", ) meta = brdc.dset_edit.meta[date.strftime("%Y-%m-%d")] data = brdc.dset_edit # TODO: Another possibility: brdc.dset_raw file_vars = {**dset.vars, **dset.analysis} file_vars["doy"] = config.date_vars(date)[ "doy" ] # TODO: workaround, so that all files are written in the same working directory -> does not work if year is changed. with config.files.open("output_rinex2_nav", file_vars=file_vars, mode="wt") as fid: # # Write RINEX navigation header # if meta["file_type"] == "N": file_type = "NAVIGATION DATA" fid.write("{:>9s}{:11s}{:40s}RINEX VERSION / TYPE\n".format(meta["version"], "", file_type)) fid.write( "{:20s}{:20s}{:20s}PGM / RUN BY / DATE\n".format(meta["program"], meta["run_by"], meta["file_created"]) ) for line in meta["comment"]: fid.write("{:60s}COMMENT\n".format(line)) fid.write( "{:>14.4e}{:>12.4e}{:>12.4e}{:>12.4e}{:10s}ION ALPHA\n" "".format( meta["iono_para"]["GPSA"]["para"][0], meta["iono_para"]["GPSA"]["para"][1], meta["iono_para"]["GPSA"]["para"][2], meta["iono_para"]["GPSA"]["para"][3], "", ) ) fid.write( "{:>14.4e}{:>12.4e}{:>12.4e}{:>12.4e}{:10s}ION BETA\n" "".format( meta["iono_para"]["GPSB"]["para"][0], meta["iono_para"]["GPSB"]["para"][1], meta["iono_para"]["GPSB"]["para"][2], meta["iono_para"]["GPSB"]["para"][3], "", ) ) # TODO fid.write('{:>22.12e}{:>19.12e}{:>9d}{:>9d}{:1s}DELTA-UTC: A0,A1,T,W\n' # ''.format(meta['a0'], meta['a1'], int(meta['t']), int(meta['w']), '')) fid.write("{:>6d}{:54s}LEAP SECONDS\n".format(int(meta["leap_seconds"]["leap_seconds"]), "")) fid.write("{:60s}END OF HEADER\n".format("")) # # Write RINEX navigation data # # TODO: # for drow in data.get_rows(): # fid.write('{d.sat:2d} {d.time:%Y%m%d %H%M%S} {d.inc0:13.4f}'.format(d=drow)) # fid.write(' {d.hurra:14.10f} ...'.format(d=drow)) for idx in range(0, data.num_obs): sat = int(data.satellite[idx][1:3]) d = data.time.gps.datetime[idx] fid.write( "{:2d}{:>3s}{:>3d}{:>3d}{:>3d}{:>3d}{:>5.1f}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format( sat, str(d.year)[2:4], d.month, d.day, d.hour, d.minute, d.second, data.sat_clock_bias[idx], data.sat_clock_drift[idx], data.sat_clock_drift_rate[idx], ) ) fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.iode[idx], data.crs[idx], data.delta_n[idx], data.m0[idx]) ) fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.cuc[idx], data.e[idx], data.cus[idx], data.sqrt_a[idx]) ) # TODO: toe depends on GNSS system time -> for BeiDou it has to be changed fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.toe.gps.gpssec[idx], data.cic[idx], data.Omega[idx], data.cis[idx]) ) fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.i0[idx], data.crc[idx], data.omega[idx], data.Omega_dot[idx]) ) # TODO: gnss_week depends on GNSS -> for BeiDou it has to be changed # TODO: codes_l2 only valid for GPS and QZSS -> Galileo data_source; rest None # TODO: 'G': 'l2p_flag', 'J': 'l2p_flag' fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.idot[idx], data.codes_l2[idx], data.gnss_week[idx], data.l2p_flag[idx]) ) # TODO: 'G': 'iodc', 'J': 'iodc', 'E': 'bgd_e1_e5b', 'C': 'tgd_b2_b3' # TODO: 'G': 'tgd', 'J': 'tgd', 'E': 'bgd_e1_e5a', 'C': 'tgd_b1_b3', 'I': 'tgd' fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.sv_accuracy[idx], data.sv_health[idx], data.tgd[idx], data.iodc[idx]) ) # TODO: transmission_time depends on GNSS system time -> for BeiDou it has to be changed # TODO: fit_interval only valid for GPS and QZSS -> for BeiDou age_of_clock_corr; rest None fid.write( "{:22.12e}{:>19.12e}{:>19.12e}{:>19.12e}\n" "".format(data.transmission_time.gps.gpssec[idx], data.fit_interval[idx], 0.0, 0.0) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rinex2_nav(dset):\n for date_offset in range(-1, 2):\n write_one_day(dset, dset.analysis[\"rundate\"] + timedelta(days=date_offset))", "def write_file(date, num_days):\n month = '{0:02d}'.format(date.month)\n day = '{0:02d}'.format(date.day)\n\n with open(file_name, 'a+') as out_file:\n out_file.write('{}-{},{}\\n'.format(month, day, num_days))", "def WriteDiary():\r\n from datetime import datetime\r\n\r\n diaryname = _getPOSCAR()\r\n diary = open(diaryname, \"w\")\r\n diary.write('***' + str(datetime.now()) + '***' + '\\n')\r\n diary.write('## ' + diaryname + '\\n')\r\n diary.close()\r\n _CopyWriteDiary('Readme', diaryname)\r\n _CopyWriteDiary('INCAR', diaryname)\r\n _CopyWriteDiary('KPOINTS', diaryname)\r\n _CopyWriteDiary('POSCAR', diaryname)\r\n _CopyWriteDiary('POTCAR', diaryname)\r\n os.rename(diaryname, diaryname + '.md')", "def writing_get_date_ordered(file_name):\n result = str(reports.get_date_ordered(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def save_history():\n\n mid = get_mid()\n back_file = contact_name + \"_\" + today\n\n if not os.path.isdir(back_path):\n print('WARNING: o {} directory found, creating.').format(back_path)\n os.mkdir(back_path)\n else:\n print(\"OK: {} found.\".format(back_path))\n\n os.chdir(back_path)\n with open(back_file, 'w') as bf:\n for mes in get_todays_history(mid):\n data = \"{}\\n\".format(mes)\n bf.write(data)", "def add_date(self, date):\n with open(self.data_filepath, 'a', newline='') as writef:\n writer = csv.writer(writef)\n writer.writerow([date.freeze()])\n self._file_modified = True", "def create_newfile():\n date = datetime.today().strftime('%d_%m_%Y').replace(\" \", \"_\")\n file_name = screen_name + '_' + date + \".json\"\n with io.FileIO(file_name, \"w\") as file:\n file.write(\"Json\")\n file.close()\n return file_name", "def save_to_file(result, date):\n try:\n os.mkdir('/Users/yueyang/Downloads/serp-626-75-json', mode=0o744)\n except FileExistsError:\n # print('Directory already exists.')\n pass\n\n filename = '{0}.json'.format(date) #datetime.today().strftime('%m-%d-%Y'), query)\n with open(os.path.join('/Users/yueyang/Downloads/serp-626-75-json', filename), 'w') as f:\n json.dump(result, f, indent=4)\n print('Saved search results to {0}'.format(f.name))", "def date_to_filename(base_path, raw_date_string):\n raw_date_string = raw_date_string[:-1]\n month, day, year = raw_date_string.split(\"/\")\n relative_path = \"{}/{}/{}.md\".format(year, month, day)\n return base_path / relative_path", "def archive(self):\n\n archive_date = self.time_file.file_date.strftime('%Y-%m-%d')\n self.record('ARCHIVE %s %s' % (archive_date,\n self.time_file.short_info()))\n\n self.keep_only_archive()", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])", "def writeRecentItems(self, filename):\n if self.recent_items == None:\n return\n else:\n try:\n file_open = open(filename, 'w')\n text = \"\"\n for key in self.recent_items:\n full_path = self.recent_items[key] + key\n new_line = full_path + '\\n'\n text += new_line\n file_open.write(text)\n file_open.close()\n \n except IOError:\n print 'Unable to write the recent files to file: %s\\n'\\\n 'No recent files will be written' % str(filename)", "def gen_filename_from_date(path,date,autoincrement = True):\n \n fname = date.isoformat().replace(':','.')\n \n if autoincrement:\n\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f)) and f!='.DS_Store']\n \n found_numbers = [int(f.strip('.html').split('_')[1]) for f in onlyfiles if fname == f[0:len(fname)] ]\n \n highest = -1 \n if len(found_numbers)>0:\n highest = max(found_numbers)\n \n return \"{}/{}_{}.html\".format(path,fname,highest+1)", "def download(progid, date):\n logger = log.getLogger('obslog.download')\n\n if not re.match(r'^G[NS]-[0-9]{4}[AB]-([CQ]|DD|FT|LP|SV)-[0-9]{0,3}$', progid):\n logger.error('This does not look like a program ID: %s', progid)\n raise SystemExit\n\n obslog = date + '_' + progid + '_obslog.txt'\n url = 'https://archive.gemini.edu/file/' + obslog\n logger.debug('URL: %s', url)\n logger.info('Downloading %s...', obslog)\n urllib.urlretrieve(url, obslog)\n return", "def _create_releaseinfo_file(projname, relinfo_str):\n dirs = projname.split('.')\n os.chdir(os.path.join(*dirs))\n print 'updating releaseinfo.py for %s' % projname\n with open('releaseinfo.py', 'w') as f:\n f.write(relinfo_str)", "def write_history_file(config):\n readline.set_history_length(int(config.get('history', 'length')))\n readline.write_history_file(config.rh_get_data('historyFile'))", "def write_file(file,dir_name):\n opened_file = open(dir_name + '/%s'%file,'w')\n opened_file.write('<?xml version=\"1.0\"?>\\n')\n return opened_file", "def savedoc():\r\n document.save('QSDoc_{0}_{1}_{2}_{3}.docx'.format(args.server, year, month, day))", "def export_calendar(calendar, filename):\n filename = verify_filename(filename)\n try:\n with open(filename, 'w') as file:\n file.writelines(calendar)\n print(f\"File {filename} created in current directory.\")\n except OSError:\n print(f\"Could not open/read {filename}\")\n sys.exit(1)", "def write(self, scanrecpath=None):\n if not scanrecpath:\n scanrecpath = self.get_scanrecpath()\n self.write_scanrec(scanrecpath)\n for obs_id in self.obsinfos:\n self.obsinfos[obs_id].write_ldat_header(scanrecpath)", "def write_new_date(self) -> None:\n\n with open(str(os.path.join(THIS_DIR, \"data_file.json\")), mode='r') as json_file:\n data = json.load(json_file)\n data[self.site] = str(self.get_last_image_date())\n json_file.close()\n with open(str(os.path.join(THIS_DIR, \"data_file.json\")), mode='w') as json_file:\n json.dump(data, json_file)\n json_file.close()", "def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()", "def write_to_file(self, filename: str) -> None:", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def create_file(date, title, text, n):\r\n \"\"\"with date as file name and text as content\"\"\"\r\n filename = \"%s_%s.txt\" % (date, n)\r\n with io.open(filename, \"w+\", encoding=\"UTF8\") as newfile:\r\n text = text.replace(\" \", \"\") #remove all spaces\r\n sentences= re.sub(\",|。\", \"\\n\", text) #one sentence per line\r\n newfile.write(title+\"\\n\")\r\n newfile.write(date+\"\\n\")\r\n newfile.write(sentences)\r\n print(filename)", "def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!", "def write_xml_file(path, version, jars):\n with open(path, \"wt\") as f:\n f.write(\"<version>%s</version>\\n\" % version)\n f.write(\"<archives>\")\n for j in jars:\n f.write(\"%s\\n\" % j)\n f.write(\"</archives>\")", "def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path" ]
[ "0.681492", "0.58828324", "0.5515422", "0.5325432", "0.52559066", "0.5186145", "0.514998", "0.5144424", "0.5138187", "0.5133589", "0.5105928", "0.5087973", "0.50841504", "0.50436205", "0.4952118", "0.49071655", "0.48948997", "0.48907757", "0.48899305", "0.48826486", "0.48811275", "0.48787796", "0.48784703", "0.48737976", "0.48483974", "0.48469964", "0.4838353", "0.48375854", "0.4825812", "0.48230988" ]
0.6964463
0
Calculate some simple statistics about users of sessions
def action_session_user_stats(args, config, db, wdb): wdb.execute('''CREATE OR REPLACE VIEW analysis_session_users AS (SELECT DISTINCT analysis_session_requests.session_id as session_id, analysis_requestlog_combined.user_sid as user_sid FROM analysis_requestlog_combined, analysis_session_requests WHERE analysis_requestlog_combined.id = analysis_session_requests.request_id ) ''') wdb.commit() # How many sessions did each user have? wdb.execute('''CREATE OR REPLACE VIEW analysis_session_count_per_user AS ( SELECT analysis_session_users.user_sid, count(analysis_session_users.session_id) as session_count FROM analysis_session_users, user WHERE analysis_session_users.user_sid = user.user_name GROUP BY analysis_session_users.user_sid );''') wdb.commit() user_ids = db.simple_query('SELECT user_sid FROM analysis_session_users') sessions_per_user = collections.Counter(user_ids) sessions_per_user['anonymous'] = sessions_per_user[None] del sessions_per_user[None] write_data('user_session_counts', { 'data': dict(sessions_per_user.most_common()), }) reverse_counts = collections.Counter( sessions_per_user.values()).most_common() write_data('user_session_counts_reverse', { 'data': list(reverse_counts), })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_stats(df):\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n # TO DO: Display counts of user types\r\n df = ['user type'].value_counts()\r\n print('count of user typs:\\n')\r\n # TO DO: Display counts of gender\r\n df = ['grnder'].value_counts()\r\n if 'Gender' in df:\r\n print('count of gender:\\n')\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n year = df['birth year'].value_counts()\r\n if 'birth year' in df:\r\n print('earliset birth year is:{year.min()}\\nmost recent is: {year.max()}\\nand most common birth year is: (year.mode()[0]')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def stats_users(app_id):\r\n users = {}\r\n auth_users = []\r\n anon_users = []\r\n\r\n # Get Authenticated Users\r\n sql = text('''SELECT task_run.user_id AS user_id,\r\n COUNT(task_run.id) as n_tasks FROM task_run\r\n WHERE task_run.user_id IS NOT NULL AND\r\n task_run.user_ip IS NULL AND\r\n task_run.app_id=:app_id\r\n GROUP BY task_run.user_id ORDER BY n_tasks DESC\r\n LIMIT 5;''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n\r\n for row in results:\r\n auth_users.append([row.user_id, row.n_tasks])\r\n\r\n sql = text('''SELECT count(distinct(task_run.user_id)) AS user_id FROM task_run\r\n WHERE task_run.user_id IS NOT NULL AND\r\n task_run.user_ip IS NULL AND\r\n task_run.app_id=:app_id;''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n for row in results:\r\n users['n_auth'] = row[0]\r\n\r\n # Get all Anonymous Users\r\n sql = text('''SELECT task_run.user_ip AS user_ip,\r\n COUNT(task_run.id) as n_tasks FROM task_run\r\n WHERE task_run.user_ip IS NOT NULL AND\r\n task_run.user_id IS NULL AND\r\n task_run.app_id=:app_id\r\n GROUP BY task_run.user_ip ORDER BY n_tasks DESC;''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n\r\n for row in results:\r\n anon_users.append([row.user_ip, row.n_tasks])\r\n\r\n sql = text('''SELECT COUNT(DISTINCT(task_run.user_ip)) AS user_ip FROM task_run\r\n WHERE task_run.user_ip IS NOT NULL AND\r\n task_run.user_id IS NULL AND\r\n task_run.app_id=:app_id;''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n\r\n for row in results:\r\n users['n_anon'] = row[0]\r\n\r\n return users, anon_users, auth_users", "def user_stats(df):\n\n print('\\n#4 USER INFO\\nCalculating User Stats...\\n')\n start_time = time.time()\n \n # TO DO: Display counts of user types\n print('Count of each User type:')\n print(df['User Type'].value_counts(dropna=False))\n \n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('\\nCount of each Gender type:')\n print(df['Gender'].value_counts(dropna=False))\n\n \n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('\\nBirth Year Statistics:')\n print(df['Birth Year'].value_counts(sort=True).head(1))\n print(df['Birth Year'].min())\n print(df['Birth Year'].max())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Trip Count By User Type:')\n for index, value in zip(df['User Type'].value_counts().index, df['User Type'].value_counts().values):\n print(index, '=', value)\n\n\n # Display counts of gender\n if 'Gender' in df.columns:\n print()\n print('Trip Count By Gender:')\n for index, value in zip(df['Gender'].value_counts().index, df['Gender'].value_counts().values):\n print(index, '=', value)\n print()\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('Earliest Year of Birth:', df['Birth Year'].min())\n print('Most Recent Year of Birth:', df['Birth Year'].max())\n print('Most Common Year of Birth:', df['Birth Year'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n try:\n print('By User Type:')\n print(df['User Type'].value_counts())\n except KeyError:\n print('User type data is not available')\n\n # TO DO: Display counts of gender\n try:\n print('\\nBy Gender:')\n print(df['Gender'].value_counts())\n except KeyError:\n print('Gender data is not available.')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n print('\\nBy Birth year:')\n print(f\"The earliest user birth year was: {int(df['Birth Year'].min())}\")\n print(f\"The most recent user birth year was: {int(df['Birth Year'].max())}\")\n print(f\"The most common user birth year was: {int(df['Birth Year'].mode()[0])}\")\n except KeyError:\n print('Birth year data is not available.')\n\n print(f'\\nThis took {time.time() - start_time}s seconds.')\n print('-'*40)", "def _compute_user_stats():\n user_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n for user in wmt16_users:\n _user_stats = HIT.compute_status_for_user(user)\n _name = user.username\n _avg_time = seconds_to_timedelta(_user_stats[1])\n _total_time = seconds_to_timedelta(_user_stats[2])\n _data = (_name, _user_stats[0], _avg_time, _total_time)\n \n if _data[0] > 0:\n user_stats.append(_data)\n \n # Sort by total number of completed HITs.\n user_stats.sort(key=lambda x: x[1])\n user_stats.reverse()\n \n return user_stats", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: Display counts of user types\r\n user_types =df['User Type'].value_counts()\r\n print(user_types)\r\n\r\n\r\n # TO DO: Display counts of gender\r\n Gender =df['Gender'].value_counts()\r\n print(Gender)\r\n\r\n\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n print('Earliest year of birth:\\n', df['Birth Year'].min())\r\n print('Most recent year of birth:\\n', df['Birth Year'].max())\r\n print('Most common year of birth:\\n', df['Birth Year'].mean())\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print(df['User Type'].value_counts())\n print('\\n\\n')\n\n # TO DO: Display counts of gender\n if 'Gender' in(df.columns):\n print(df['Gender'].value_counts())\n print('\\n\\n')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in(df.columns):\n year = df['Birth Year'].fillna(0).astype('int64')\n print(f'Earliest birth year is: {year.min()}\\nmost recent is: {year.max()}\\nand most common birth year is: {year.mode()[0]}')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def get_stats(self):\n result = {\n 'datetime': dt.datetime.now().strftime('%d.%m.%Y %H:%M:%S'),\n 'total': db.session.query(User). \\\n count(),\n 'unverified': db.session.query(User). \\\n filter(db.not_(User.verified)). \\\n count(),\n 'male students': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Student). \\\n count(),\n 'male employees': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Employee). \\\n count(),\n 'male alumni': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Alumni). \\\n count(),\n 'female students': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Student). \\\n count(),\n 'female employees': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Employee). \\\n count(),\n 'female alumni': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Alumni). \\\n count()\n }\n\n return result", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n if \"User Type\" in df:\n print(\"User Types are:\\n\", df[\"User Type\"].value_counts() ,'\\n')\n else:\n print (\"No Information available for User's types.\\n\")\n \n # Display counts of gender\n if \"Gender\" in df:\n print(\"User's Gender are as following:\\n\", df[\"Gender\"].value_counts() ,'\\n')\n else:\n print ( \"No Information available for User's gender.\\n\")\n \n # Display earliest, most recent, and most common year of birth\n if \"Birth Year\" in df:\n print(\"User's most common year of birth is:\\n\", int(df[\"Birth Year\"].value_counts().idxmax()) ,'\\n')\n print('The oldest user birth date:\\n', int(df[\"Birth Year\"].min()) ,'\\n')\n print('The youngest user birth date:\\n', int(df[\"Birth Year\"].max()),'\\n')\n else:\n print ( \"No Information available for User's birth Year.\\n\")\n \n print('-'*40)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n j = df['User Type'].value_counts()\n print('What is the breakdown of users\\n', j)\n print()\n # TO DO: Display counts of gender\n k = df['Gender'].value_counts()\n print('What is the breakdown of genders?\\n', k)\n print()\n # TO DO: Display earliest, most recent, and most common year of birth\n l = df['Birth Year'].max()\n m = df['Birth Year'].min()\n n = df['Birth Year'].mode()[0]\n print('What is the oldest, youngest, and most popular year of birth, respectively?')\n print(int(l), int(m), int(n))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types: \\n{}\\n\".format(df[\"User Type\"].value_counts()))\n\n # Display counts of gender\n if 'Gender' in df:\n print(\"Counts of user types: \\n{}\\n\".format(df['Gender'].value_counts()))\n else:\n print(\"Given data doesn't contain gender data.\\n\")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df:\n print(\"The earliest birth year is: {}.\".format(df[\"Birth Year\"].min()))\n print(\"The most recent birth year is: {}.\".format(df[\"Birth Year\"].max()))\n print(\"The most common birth year is: {}.\".format(df[\"Birth Year\"].mode()[0]))\n else:\n print(\"Given data doesn't contain birth year data.\\n\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n type_count = df['User Type'].value_counts()\n print('Counts of User Types: ', type_count)\n\n # TO DO: Display counts of gender\n gender_count = df['Gender'].value_counts()\n print('\\nCounts of Genders: ', gender_count)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n earliest_year = int(df['Birth Year'].min())\n recent_year = int(df['Birth Year'].max())\n common_year = int(df['Birth Year'].mode()[0])\n print('\\nEarliest Birth Year: ', earliest_year)\n print('Most Recent Birth Year: ', recent_year)\n print('Most Common Birth Year: ', common_year)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n \n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('counts of user types : {}'.format(len(df['User Type'].unique())))\n\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('counts of gender : {}'.format(len(df['Gender'].unique())))\n else:\n print('Gender information not available')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('counts of earliest, most recent, and most common year of birth : {}'.format(df['Birth Year'].max()))\n else:\n print('Earliest, most recent, and most common year of birth information not available')\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('User type counts:')\n print(df['User Type'].value_counts())\n\n # Display counts of gender\n print('User gender counts:')\n try:\n print(df['Gender'].value_counts())\n except:\n print('This file has no gender data')\n\n # Display earliest, most recent, and most common year of birth\n print('User birth year:')\n try:\n earliest = min(df['Birth Year'])\n most_recent = max(df['Birth Year'])\n most_common = df['Birth Year'].value_counts().index.tolist()[0]\n print('Birth Years:\\nEarliest: {}\\nMost Recent: {}\\nMost Common: {}'\n .format(earliest, most_recent, most_common))\n except:\n print('This file has no birth year data')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type = df[\"User Type\"].value_counts()\n print(\"These are the user types requested: \",user_type)\n\n # TO DO: Display counts of gender\n gender = df[\"Gender\"].value_counts()\n print(\"These are the genders requested: \",gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n early_year = df[\"Birth Year\"].min()\n print(\"The earliest year of birth for this filtered set is: \", int(early_year))\n \n recent_year = df[\"Birth Year\"].max()\n print(\"The most recent year of birth for this set is: \",int(recent_year))\n \n common_year = df[\"Birth Year\"].mode()\n print(\"The most common year of birth is: \",int(common_year))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n # printing out the total numbers of user types\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n\n # Display counts of gender", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n # TO DO: Display counts of user types\n countUserType = df['User Type'].value_counts()\n print(f\"The counts of user types: {countUserType}\")\n\n # TO DO: Display counts of gender\n if 'Gender' in df:\n countGender = df['Gender'].value_counts()\n print(f\"The counts of gender: {countGender}\")\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df:\n earliest = int(df['Birth Year'].min())\n recent = int(df['Birth Year'].max())\n common_year = int(df['Birth Year'].mode()[0])\n print(f\"\\nThe earliest year of birth: {earliest}\\n\\nThe most recent year of birth: {recent}\\n\\nThe most common year of birth: {common_year}\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n try:\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n print('\\n')\n # Display counts of gender\n gender = df['Gender'].value_counts()\n print(gender)\n print('\\n')\n # Display earliest, most recent, and most common year of birth\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n except:\n print('Sorry there was an error whiles processing your request')", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n a = len(df['User Type'].unique())\n print('counts of user types', a)\n\n # TO DO: Display counts of gender\n b = len(df['Gender'].unique())\n print('counts of gender', b)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n max = df['Birth Year'].max()\n min = df['Birth Year'].min()\n common = df['Birth Year'].mode()[0]\n print('earliest of birth is %s, most recent of birth is %s, and most common year of birth is %s' % (min, max, common))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df.groupby(['User Type']).sum()\n print('The total number of different user types are: \\n', user_types)\n\n # Display counts of gender\n gender = df.groupby(['Gender']).sum()\n print('The total number of different user types are: ', gender)\n\n # Display earliest, most recent, and most common year of birth\n earliest_birthyear = df['Birth Year'].min()\n print('The first birthyear is: ', earliest_birthyear)\n\n most_recent_birthyear = df['Birth Year'].max()\n print('The last birthyear is: ', most_recent_birthyear)\n\n most_common_birthyear = df['Birth Year'].mode()\n print('The most common birthyear is: ', most_common_birthyear)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(\"The counts of user types are: {}\".format(user_types))\n\n # Display counts of gender\n if \"Gender\" in df.columns:\n gender = df['Gender'].value_counts()\n print(\"The counts of gender are: {}\".format(gender))\n else:\n print(\"Unavailable\")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n earliest_birth = df[\"Birth Year\"].min()\n print(\"The earliest year of birth is: {}\".format(earliest_birth))\n recent_birth = df['Birth Year'].max()\n print(\"The recent year of birth is: {}\".format(recent_birth))\n common_birth = df['Birth Year'].mode()\n print(\"The common year of birth is: {}\".format(common_birth))\n else:\n print(\"Unavailable\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"What is the breakdown of users?\")\n # print value counts for each user type\n user_types = df['User Type'].value_counts()\n if user_types is None:\n print(\"No user type data to share.\")\n else:\n print(user_types)\n\n # Display counts of gender\n print(\"\\nWhat is the breakdown of gender?\")\n # print value counts for each gender\n if 'Gender' in df.columns:\n gender = df['Gender'].value_counts()\n\n if gender is None:\n print(\"No gender data to share.\")\n else:\n print(gender)\n else:\n print(\"No gender data to share.\")\n\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print(\"\\nEarliest Year Of Birth:\", df['Birth Year'].min())\n print(\"\\nMost Recent Year Of Birth:\", df['Birth Year'].max())\n print(\"\\nMost Common Year Of Birth:\", df['Birth Year'].mode().values[0])\n else:\n print(\"\\nNo birth year data to share.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print('The number of subscribers and customers are:', user_types)\n \n # TO DO: Display counts of gender (Male / Female / Unknown)\n if 'Gender' in df: # perform gender related calculation\n gender = df['Gender'].value_counts()\n print('The number of males and females is:', gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df: # perform gender related calculation\n earliest_year = df['Birth Year'].min()\n print('The earliest year of birth is', earliest_year)\n\n recent_year = df['Birth Year'].max()\n print('The most recent year of birth is', recent_year)\n\n common_year = df['Birth Year'].mode()[0]\n print('The most common year of birth is', common_year)\n\n print(\"\\nRunning this code took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('\\nUser Type:')\n print(df['User Type'].value_counts())\n\n # TO DO: Display counts of gender\n if 'Gender' not in df:\n print(\"\\nI'm sorry, there is no gender data for this city.\")\n else:\n print('\\nGender Type:')\n print(df['Gender'].value_counts())\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' not in df:\n print('\\nData related to birth year of users is not available for this city.')\n else:\n birth = df.groupby('Birth Year', as_index=False).count()\n print('\\nEarliest year of birth was {}.'.format(int(birth['Birth Year'].min())))\n print('Most recent year of birth was {}.'.format(int(birth['Birth Year'].max())))\n print('Most common year of birth year was {}.'.format(int(birth.iloc[birth['Start Time'].idxmax()]['Birth Year'])))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n num_types=df['User Type'].groupby(df['User Type']).count()\n print(num_types)\n\n # Display counts of gender\n num_gender=df['Gender'].groupby(df['Gender']).count()\n print(num_gender)\n\n # Display earliest, most recent, and most common year of birth\n b_year=df['Birth Year']\n print(\"earliest year of birth :{}\".format(b_year.min()))\n print(\"most recent year of birth : {}\".format(b_year.max()))\n print(\"most common year of birth : {}\".format(b_year.mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types\r\n print('Counts of user types: ')\r\n print(df['User Type'].value_counts())\r\n\r\n # Display counts of gender and handle Washington.csv missing gender column\r\n if 'Gender' in df.columns:\r\n print('Counts of gender: ')\r\n print(df['Gender'].value_counts())\r\n else:\r\n print('No Gender Data Available.\\n')\r\n\r\n # Display earliest, most recent, and most common year of birth and handle Washington.csv missing gender column\r\n #earliest year of birth\r\n if 'Birth Year' in df.columns:\r\n print('Earliest birth year: ')\r\n print(df['Birth Year'].min())\r\n #most recent year of birth\r\n print('Most recent birth year: ')\r\n print(df['Birth Year'].max())\r\n #most common year of birth\r\n print('Most common birth year: ')\r\n print(df['Birth Year'].mode()[0])\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n else:\r\n print('No Birth Year Data Available.\\n')", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n utcounts = df[\"User Type\"].value_counts()\n print(\"The counts for each user type are:\\n\", utcounts, sep = \"\")\n\n # Display counts of gender\n gencounts = df[\"Gender\"].value_counts()\n print(\"\\nThe counts for each gender are:\\n\", gencounts, sep = \"\")\n\n # Display earliest, most recent, and most common year of birth\n by_earliest = int(df[\"Birth Year\"].min())\n by_mostrec = int(df[\"Birth Year\"].max())\n by_common = int(df[\"Birth Year\"].mode()[0])\n print(\"\\nThe earliest, most recent, and most common year of birth, respectively are:\", by_earliest, by_mostrec, by_common)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n if city == 'washington':\n print(\"That info isn't available.\")\n break\n else:\n print(\"\\nUser types are: \", user_types)\n\n # TO DO: Display counts of gender\n gender = df['Gender'].value_counts()\n if city == 'washington':\n print(\"That info isn't available\")\n break\n else:\n print(\"\\nThe breakdown of gender is: \", gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n oldest_birth=np.nanmin(df['Birth Year'])[0]\n print('\\nOldest birth year is', int(oldest_birth))\n\n youngest_birth=np.nanmax(df['Birth Year'])[0]\n print('\\nYoungest birth year is', int(youngest_birth))\n\n common_birth=df['Birth Year'].mode()[0]\n print('\\nMost common birth year is', int(common_birth))", "def userstats(request):\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }" ]
[ "0.7254027", "0.7163929", "0.7048962", "0.7035427", "0.70172447", "0.6989257", "0.69802827", "0.6969384", "0.6962817", "0.69367063", "0.6914038", "0.68993825", "0.6896018", "0.6889866", "0.68877953", "0.68837255", "0.68703", "0.68660045", "0.68443763", "0.6824871", "0.6811637", "0.6811055", "0.680757", "0.68006474", "0.67958313", "0.67935777", "0.6791923", "0.675206", "0.6746323", "0.67369664" ]
0.78045124
0
if user asks for an item that's unavailable, they should not be given the item, and their money should be returned
def test_unavailable_item(self): item, change, _ = give_item_and_change('crisps', .50) self.assertIsNone(item) self.assertEqual(change, 0.5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unavailable_item(self):\n item, change, _ = give_item_and_change('crisps', '1.00 .50')\n self.assertIsNone(item)\n self.assertEqual(change, 1.35)", "def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def return_item(self,reason):\n if reason == \"defective\":\n self.status = \"defective\"\n self.price = 0\n elif reason == \"unopened\":\n self.status = \"for sale\"\n else:\n self.status = \"used\"\n self.price -= (.20 * self.price)\n return self", "def pay_for_item(self, item):\n while self.amount < item.price:\n paid_amount = float(input(f\"Pay €{round((item.price - self.amount), 2)} : \"))\n if paid_amount <= 0:\n custom_log(\"Invalid amount entered.\", MSG_ERROR)\n continue\n self.amount = self.amount + paid_amount", "def inspect_inventory(sell=False):\r\n choice = 'poop'\r\n\r\n if sell:\r\n while choice != 'done':\r\n choices = list(player.get_inventory())\r\n choices += ['done']\r\n choice = helpful.pick_item(choices,'Sell something?','done')\r\n # if choice == 'done':\r\n if str(choice) == 'mythical kumquat':\r\n raw_input(\"You can't sell your \" + str(choice) + \"!\\n\")\r\n elif choice == 'done':\r\n return\r\n else:\r\n cost = choice.get_cost()\r\n question = 'Sell your ' + str(choice) + ' for $' + str(cost) + '?'\r\n sell_yn = helpful.pick_item(['yes','no'],question)\r\n if sell_yn == 'yes':\r\n cost = choice.get_cost()\r\n player.gain_money(cost)\r\n player.drop(choice)\r\n raw_input('You sold your ' + str(choice) + '. ' + \\\r\n \"That's $\" + str(cost) + ' more in your pocket.\\n')\r\n\r\n else: #if not selling\r\n while choice != 'done':\r\n choices = list(player.get_inventory())\r\n choices += ['done']\r\n intro = 'Type item name/number for more info...\\n\\nInventory:' \r\n choice = helpful.pick_item(choices,intro,'done')\r\n if choice == 'done':\r\n return\r\n raw_input(choice.advanced_str())\r\n if choice.get_health() > 0:\r\n use_yn = helpful.pick_item(['yes','no'],'Use this item?')\r\n if use_yn == 'yes':\r\n player.use(choice)", "def option_two():\n if ADD_PRODUCTS == {}:\n print \"\\n**No products availabe**\" #Cannot to buy\n press_enter()\n reset()\n main_menu()\n else:\n ask_if_want()", "def select_item_noreq(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n html = render_to_string(\"include/hardware_user_email.html\", {\n 'item_id': item.id\n })\n return JsonResponse({\n 'content': html\n })", "def do_use(self, arg):\r\n itemToUse = arg.lower()\r\n \r\n if itemToUse == '':\r\n print('Use what? Type \"inv\" to see the items in your invetory.')\r\n return\r\n \r\n cantUse = False\r\n \r\n #look up the item the player describes\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n if itemToUse not in invDescWords:\r\n print('You do not have that item to use it')\r\n return\r\n \r\n for item in getAllItemsMatchingDesc(itemToUse, inventory):\r\n if worldItems[item].get(USEABLE, True) == False:\r\n cantUse = True\r\n continue\r\n print('%s' % (worldItems[item][USEDESCTRUE]))\r\n #print('You use %s' % (worldItems[item][SHORTDESC]))\r\n #inventory.remove(item) \r\n return\r\n \r\n if cantUse:\r\n print('You cannot use \"%s\".' % (itemToUse))\r\n else:\r\n print('You do not have that item to use.')", "def price(usr, item, searches = 2, method = \"AVERAGE\", deduct = 0):\n if not method in ShopWizard.methods: raise invalidMethod()\n\n if isinstance(item, Item):\n item = item.name\n\n prices = []\n dets = {}\n for x in range(0, searches):\n print('performing search: {}'.format(x))\n results = ShopWizard.search(usr, item)\n\n # Set to -1 if not found\n if not results:\n prices.append(-1)\n continue\n\n prices.append(int(results[item].price))\n dets[str(results[item].price)] = (results[item].owner, results[item].id)\n\n time.sleep(ShopWizard.waitTime)\n\n # Determines if item was UB\n print(prices)\n if sum(prices) == len(prices) * -1:\n return False\n\n prices = list(filter(lambda x: x != -1, prices))\n\n if method == ShopWizard.RETLOW:\n price = sorted(prices)[0]\n return (price, dets[str(price)][0], dets[str(price)][1])\n\n return ShopWizard.__determinePrice(prices, method, deduct)", "def check_restrictions(self):\n from .signals import determine_availability\n\n responses = determine_availability.send(\n self.item.event, item=self.item,\n variations=[self.to_variation_dict()], context=None,\n cache=self.item.event.get_cache()\n )\n price = self.default_price if self.default_price is not None else self.item.default_price\n for receiver, response in responses:\n if 'available' in response[0] and not response[0]['available']:\n return False\n elif 'price' in response[0] and response[0]['price'] is not None and response[0]['price'] < price:\n price = response[0]['price']\n return price", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def check_money(drink, amount):\n if (drink == \"espresso\" and amount < MENU[drink][\"cost\"]) or (drink == \"latte\" and amount < MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount < MENU[drink][\"cost\"]):\n # if not enough money, start over\n print(f\"Sorry that's not enough money. Drink is ${MENU[drink]['cost']}. You gave ${amount}. Money refunded.\")\n return False\n else:\n return True", "def donate(self):\n\n # Get item\n import converter\n self.hero.inventory_menu()\n item = prompt(\"Select a weapon, shield or armor to donate. Or \\\npress enter to exit. \").lower()\n item = converter.convert(item)\n\n # If item is a weapon, shield or armor, accept the donation\n if isinstance(item, items.Weapon) or isinstance(item, items.Shield) or isinstance(item, items.Armor):\n if item in self.hero.inventory:\n self.donations.append(item)\n self.hero.drop(item)\n self.sort_donations()\n prompt(\"\\\"Thank you for your donation.\\\"\")\n else:\n prompt(\"You don't have one!\")\n\n # If item is a real item but is not in the above classes, do not accept.\n elif item != False:\n prompt(\"That type of item is not needed.\")", "def get_user_noreq(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n target_user = User.objects.filter(email=request.POST['email'])\n if not target_user.exists():\n # In this case we don't want to return to the initial page\n return JsonResponse({\n 'msg': \"ERROR: The user doesn't exist\"\n })\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n borrowing = Borrowing(user=target_user.first(), item=item, borrowing_by=request.user)\n borrowing.save()\n return self.init_and_toast(\"The item has been borrowed succesfully\")", "def get_items_not_in_stock():\n try:\n return get_items_arent_on_stock(), 200\n except:\n return \"An error ocurred\", 404", "def test_NegativePriceCheck(self):\n # Basic price check\n self.log.info(\"Price checking Negative Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Negative Item\")\n \n # Confirm the right item, at the right price\n # NOTE: Price check returns negative prices as possitive. Legacy defect deemed 'Will Not Fix'\n self.read_price_check(\"Negative Item\", \"$5.00\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we are in a transaction\n if not self.in_transaction():\n self.tc_fail(\"POS did not start a transaction; can not confirm item was added\")\n else:\n self.log.info(\"Confirmed we are in a transaction\")\n \n # Confirm we added the item, and that it was negative\n ret = self.confirm_line(-1, \"Negative Item\", \"-$5.00\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def amount_entered():\n while True: #Run until a suitable input is passed.\n try:\n amt = int(input(\"Enter value you wish to trade >>> \"))\n if amt <= 0:\n raise Exception\n return amt\n except ValueError: #if a string is entered\n print(\"Please enter an integer\")\n except Exception: #if a negative digit is entered\n print(\"Value cannot be less than or equal to 0\")", "def check_for_offer(self, bid, commodity, limit, actual, quantity, price):\n if bid:\n if len(self.trades[\"buys\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"buys\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"buys\"][commodity])\n\n # if total < limit:\n # #PLACE MORE BIDS.\n return total\n\n else:\n if len(self.trades[\"asks\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"asks\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"asks\"][commodity])\n #\n # if total < limit:\n # #PLACE MORE asks.\n # return total\n # if total < limit:\n # #PLACE MORE asks.\n return total # - limit", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def prompt_user_money_to_withdrawl():\n print('What amount of money do you want to withdrawl?:')\n return input()", "async def _bailout_heist(self, ctx, user: discord.Member=None):\r\n author = ctx.message.author\r\n theme = await self.thief.get_guild_theme(ctx.guild)\r\n\r\n t_bail = theme[\"Bail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if user is None:\r\n player = author\r\n else:\r\n player = user\r\n\r\n if await self.thief.get_member_status(player) != \"Apprehended\":\r\n return await ctx.send(\"{} is not in jail.\".format(player.display_name))\r\n\r\n cost = await self.thief.get_member_bailcost(player)\r\n if not await bank.get_balance(player) >= cost:\r\n await ctx.send(\"You do not have enough to afford the {} amount.\".format(t_bail))\r\n return\r\n\r\n if player.id == author.id:\r\n msg = (\"Do you want to make a {0} amount? It will cost {1} credits. If you are \"\r\n \"caught again, your next {2} and {0} amount will triple. \"\r\n \"Do you still wish to pay the {0} amount?\".format(t_bail, cost, t_sentence))\r\n else:\r\n msg = (\"You are about pay a {2} amount for {0} and it will cost you {1} credits. \"\r\n \"Are you sure you wish to pay {1} for {0}?\".format(player.name, cost, t_bail))\r\n\r\n await ctx.send(msg)\r\n response = await self.bot.wait_for('MESSAGE', timeout=15, check=lambda x: x.author == author)\r\n\r\n if response is None:\r\n await ctx.send(\"You took too long. canceling transaction.\")\r\n return\r\n\r\n if \"yes\" in response.content.lower():\r\n msg = (\"Congratulations {}, you are free! Enjoy your freedom while it \"\r\n \"lasts...\".format(player.display_name))\r\n await bank.withdraw_credits(author, cost)\r\n await self.thief.set_member_free(author)\r\n await self.thief.set_member_oob(author, False)\r\n elif \"no\" in response.content.lower():\r\n msg = \"Canceling transaction.\"\r\n else:\r\n msg = \"Incorrect response, canceling transaction.\"\r\n\r\n await ctx.send(msg)", "def buy_item(self, item):\n if self.amount < item.price:\n custom_log(\"Insufficient amount. Insert more coins.\", MSG_ERROR)\n else:\n self.amount = round((self.amount - item.price), 2)\n item._buy()\n custom_log(f\"You bought - {item.name}, remaining cash - €{self.amount}\", MSG_DEBUG)", "def get_price():\n\n while (True):\n price = input(\"Enter the purchase price (xx.xx) or 'q' to quit: \")\n if(price.capitalize() == 'Q'):\n return -1\n elif price.replace('.', '').isdigit() and not is_valid(price):\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\\n\")\n elif not price.replace('.', '').isdigit():\n print(\"Illegal entry: Must be a price like (1.75) or 'q' for quit.\\n\")\n else:\n return float(price)", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_request_item_not_found(self):\n r = Requester( self.logger )\n ( search_key, search_value ) = ( 'ISBN', self.isbn_not_found )\n result_dct = r.request_item(\n self.patron_barcode, search_key, search_value, self.pickup_location, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n {'Problem': {'ErrorCode': 'PUBRI003', 'ErrorMessage': 'No result'}}, result_dct )", "def action_p2p(self, event, search_item=None, *args, **kwargs):\n player = kwargs.get('player') or event.player_name\n price = int(kwargs.get('price', self.P2P_SELLER_DEFAULT_PRICE))\n\n try:\n items = [int(kwargs['sid'])]\n except (KeyError, ValueError):\n items = self.bot.inventory.find_search_items_from_names(search_item)\n\n self.log.debug(items)\n\n if len(items) > 1:\n self.chat.send_message(\\\n gettext('You need to be more specific as the following items match:'),\n event=event\n )\n\n for item in items:\n try:\n name = EmrossWar.ITEM[str(item)]['name']\n except KeyError:\n name = gettext('Unknown item')\n\n self.chat.send_message(gettext('sid={0}, name={1}').format(\\\n item, name),\n event=event\n )\n\n self.chat.send_message(\\\n gettext('You could try using the item number instead eg. sid=1234'),\n event=event\n )\n return\n\n sellable_item = None\n\n for item in items:\n for item_id, data in self.bot.inventory.data[item].iteritems():\n try:\n if int(data['lockinfo']['locked']) == 1:\n self.chat.send_message(gettext('That item is locked for {0}!').format(\\\n self.bot.human_friendly_time(data['lockinfo']['secs'])), event=event)\n continue\n except KeyError:\n pass\n\n if int(data['sale']) > 0:\n sellable_item = item_id\n break\n\n if not sellable_item:\n self.chat.send_message(gettext(\"I couldn't find that item, no deal!\"), event=event)\n return\n\n city = self.bot.richest_city()\n cost = price * (self.SELLING_FEE / 100)\n\n if city.resource_manager.meet_requirements({Resource.GOLD: cost}, **kwargs):\n result = self.sell_item(city, sellable_item, price, player.encode('utf8'), event=event)\n\n if result == EmrossWar.SUCCESS:\n self.chat.send_message(gettext(\"Don't forget to buy that item, you hear?\"), event=event)\n else:\n self.chat.send_message(gettext(\"Something didn't go to plan..\"), event=event)\n else:\n self.chat.send_message(gettext('That would cost me too much!'), event=event)", "def purchase(self, item_type):", "def equip(self):\n item_name = input(\"What item do you want to equip?\\n>\")\n if item_name in self.backpack:\n item = self.backpack[item_name]\n else:\n return \"You don't have this\"\n if item.type in self.equipped:\n self.equipped[item.type] = item\n if item.type == \"Weapon\":\n self.strength = item.strength\n return f\"You have equipped {item.name} on {item.type} item slot\"\n else:\n return \"You can not equip this\"" ]
[ "0.7092874", "0.6525508", "0.6484838", "0.64494467", "0.6339492", "0.62511873", "0.6181593", "0.6174058", "0.61449325", "0.6115728", "0.61110276", "0.6052567", "0.60428303", "0.60210925", "0.6020287", "0.60103595", "0.59621745", "0.59602994", "0.595624", "0.5945737", "0.594224", "0.5916034", "0.59072566", "0.59061974", "0.589347", "0.588972", "0.588855", "0.58703727", "0.5846385", "0.5841097" ]
0.7022732
1
Find the rotation matrix that will rotate a onto b.
def find_rotation(a, b): a.shape = (3,) b.shape = (3,) a /= np.linalg.norm(a) b /= np.linalg.norm(b) v = np.cross(a, b) angle_AB = -1*vector_angle(a, b) print(angle_AB) s = np.linalg.norm(v) * np.sin(angle_AB) c = np.dot(a, b) * np.cos(angle_AB) # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2 I = np.identity(3) Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def CombineRotation(a, b):\n # Use matrix multiplication: c = b*a.\n # We put 'b' on the left and 'a' on the right because,\n # just like when you use a matrix M to rotate a vector V,\n # you put the M on the left in the product M*V.\n # We can think of this as 'b' rotating all the 3 column vectors in 'a'.\n\n return RotationMatrix([\n [\n b.rot[0][0]*a.rot[0][0] + b.rot[1][0]*a.rot[0][1] + b.rot[2][0]*a.rot[0][2],\n b.rot[0][1]*a.rot[0][0] + b.rot[1][1]*a.rot[0][1] + b.rot[2][1]*a.rot[0][2],\n b.rot[0][2]*a.rot[0][0] + b.rot[1][2]*a.rot[0][1] + b.rot[2][2]*a.rot[0][2]\n ],\n [\n b.rot[0][0]*a.rot[1][0] + b.rot[1][0]*a.rot[1][1] + b.rot[2][0]*a.rot[1][2],\n b.rot[0][1]*a.rot[1][0] + b.rot[1][1]*a.rot[1][1] + b.rot[2][1]*a.rot[1][2],\n b.rot[0][2]*a.rot[1][0] + b.rot[1][2]*a.rot[1][1] + b.rot[2][2]*a.rot[1][2]\n ],\n [\n b.rot[0][0]*a.rot[2][0] + b.rot[1][0]*a.rot[2][1] + b.rot[2][0]*a.rot[2][2],\n b.rot[0][1]*a.rot[2][0] + b.rot[1][1]*a.rot[2][1] + b.rot[2][1]*a.rot[2][2],\n b.rot[0][2]*a.rot[2][0] + b.rot[1][2]*a.rot[2][1] + b.rot[2][2]*a.rot[2][2]\n ]\n ])", "def rotation_to_align_a_with_b(a, b):\n norm_a = np.linalg.norm(a)\n norm_b = np.linalg.norm(b)\n if not np.allclose(a, a/norm_a):\n print('Input a vector not unit normal - normalising')\n a = a / norm_a\n print(a)\n if not np.allclose(b, b/norm_b):\n print('Input b vector not unit normal - normalising')\n b = b / norm_b\n print(b)\n\n v = np.cross(a,b)\n #s = np.linalg.norm(v)\n c = np.dot(a,b)\n f = 1./(1. + c)\n vmat = np.array([[ 0, -v[2], v[1]],\n [ v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n return np.eye(3,3) + vmat + f *(np.matmul(vmat,vmat))", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot", "def get_mgc_rotation(side_a, side_b):\n # Can be reused when building the MST\n k_rotations_a = 0\n k_rotations_b = 0\n mgc_specific_relation = None\n piece_swap = False\n\n # No rotation required as MGC works with Right -> Left and Bottom -> Top relations correctly\n if side_a == RIGHT:\n k_rotations_a = 0\n mgc_specific_relation = RIGHT_LEFT\n k_rotations_b = k_rotational[side_a][side_b]\n if side_a == BOTTOM:\n k_rotations_a = 0\n mgc_specific_relation = BOTTOM_TOP\n k_rotations_b = k_rotational[side_a][side_b]\n\n if side_a == LEFT:\n if side_b == RIGHT:\n # Pretty much switch positions and that will be all\n piece_swap = True\n k_rotations_a = 0\n k_rotations_b = 0\n else:\n # Make the LEFT to be RIGHT\n # Adjust side_b to become LEFT\n k_rotations_a = 2\n k_rotations_b = k_rotational[side_a][side_b]\n mgc_specific_relation = RIGHT_LEFT\n if side_a == TOP:\n if side_b == BOTTOM:\n # Pretty much switch positions and that will be all\n piece_swap = True\n k_rotations_a = 0\n k_rotations_b = 0\n else:\n # Make the TOP side to be BOTTOM\n # Adjust side_b to become TOP\n k_rotations_a = 2\n k_rotations_b = k_rotational[side_a][side_b]\n mgc_specific_relation = BOTTOM_TOP\n return k_rotations_a, k_rotations_b, mgc_specific_relation, piece_swap", "def matrix_angle( B, A ):\n Aflat = A.reshape(-1)\n Aflat = unit_vector(Aflat)\n Bflat = B.reshape(-1)\n Bflat = unit_vector(Bflat)\n #return np.arccos((np.dot( Aflat, Bflat ) / max( np.linalg.norm(Aflat) * np.linalg.norm(Bflat), 1e-10 )))\n return np.arccos(np.clip(np.dot(Aflat, Bflat), -1.0, 1.0))", "def getOblateXRotMatrix(aStar1, aStar2):\n aStarDir = aStar2 - a1\n aStarmid = aStar1 + 0.5 * aStarDir\n kath = np.sqrt((aStarDir[0] * aStarDir[0] + aStarDir[1] * aStarDir[1]) / 4.0)\n phi = np.arctan( abs( (aStarDir[2]/2) / kath) )\n octantAStar2 = octant(aStar2)\n if octantAStar2 in [1, 2, 7, 8]: #\n phi = -phi\n print \"phi =\" , np.rad2deg(phi)\n RotX = np.matrix( [ [ 1.0, 0.0 , 0.0 ],\n [ 0.0, np.cos(phi), np.sin(phi)],\n [ 0.0, -np.sin(phi), np.cos(phi)]\n ])\n return np.asarray( RotX )", "def _findrotationmatrix(ccdata1, ccdata2):\n natoms = ccdata1.natom\n J = np.zeros((3, 3), dtype=np.float)\n\n for i in range(natoms):\n J += np.outer(ccdata1.atomcoords[0][i], ccdata2.atomcoords[0][i])\n\n U, s, V = np.linalg.svd(J)\n\n R = np.transpose(np.dot(V, np.transpose(U)))\n\n return R", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def rotmat(a, b, c, hom_coord=False): # apply to mesh using mesh.apply_transform(rotmat(a,b,c, True))\n def z(a):\n return np.array([[np.cos(a), np.sin(a), 0, 0],\n [-np.sin(a), np.cos(a), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n def y(a):\n return np.array([[np.cos(a), 0, np.sin(a), 0],\n [0, 1, 0, 0],\n [-np.sin(a), 0, np.cos(a), 0],\n [0, 0, 0, 1]])\n\n r = z(a).dot(y(b)).dot(z(c)) # pylint: disable=E1101\n if hom_coord:\n return r\n else:\n return r[:3, :3]", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def get_scan_rotation_matrix(mount_angle, base_angle):\n alpha = 0 # rotation about y (roll)\n # negate mount_angle\n beta = np.deg2rad(-mount_angle) # rotation about x (pitch, mount_angle)\n gamma = np.deg2rad(base_angle) # rotation about z (yaw, base_angle)\n return tf.euler_matrix(alpha, beta, gamma, 'sxyz')", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def orientToXYZR( a, b ):\n if allclose(a,b):\n return (0,1,0,0)\n an,bn = normalise( (a,b) )\n angle = arccos(dot(an,bn))\n x,y,z = crossProduct( a, b )[0]\n if allclose( (x,y,z), 0.0):\n y = 1.0\n return (x,y,z,angle)", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q2\n if only_xy:\n _q1 = q1[0:2, :]\n _q2 = q2[0:2, :]\n else:\n _q1 = q1\n _q2 = q2\n\n _n = _q1.shape[0]\n A = _q1@_q2.T\n U, s, Vh = svd(A)\n S = eye(_n)\n\n # if reflections are not allowed and the determinant of A is negative,\n # then the entry corresponding to the smallest singular value is negated\n # as in the Kabsch algorithm\n if det(A) < 0 and not allow_reflection:\n S[-1, -1] = -1 # the last entry of the matrix becomes -1\n\n _R = U@S@Vh # optimal\n \n # if only_xy, the top left block of the matrix is _R and the rest is identity matrix\n if only_xy:\n R = eye(n)\n R[0:2, 0:2] = _R\n else:\n R = _R\n \n q2new = R@q2\n\n return (q2new, R)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def vrrotvec(a,b):\n a = normalize(a)\n b = normalize(b)\n ax = normalize(np.cross(a,b))\n angle = np.arccos(np.minimum(np.dot(a,b),[1]))\n if not np.any(ax):\n absa = np.abs(a)\n mind = np.argmin(absa)\n c = np.zeros((1,3))\n c[mind] = 0\n ax = normalize(np.cross(a,c))\n r = np.concatenate((ax,angle))\n return r", "def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def plummer_rotation(r,b,M,G=astronomicalG):\n return np.sqrt(2*G*np.power(10.,M)*r*r*np.power(b*b+r*r,-1.5))", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def _get_transformation_matrix(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):\n\n # CONVERT DEGREES TO RADIANS\n rotation = math.pi * rotation / 180.\n shear = math.pi * shear / 180.\n\n # ROTATION MATRIX\n c1 = tf.math.cos(rotation)\n s1 = tf.math.sin(rotation)\n one = tf.constant([1], dtype='float32')\n zero = tf.constant([0], dtype='float32')\n rotation_matrix = tf.reshape(tf.concat([\n c1, s1, zero, -s1, c1, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # SHEAR MATRIX\n c2 = tf.math.cos(shear)\n s2 = tf.math.sin(shear)\n shear_matrix = tf.reshape(tf.concat([\n one, s2, zero, zero, c2, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # ZOOM MATRIX\n zoom_matrix = tf.reshape(tf.concat([\n one/height_zoom, zero, zero, zero, one/width_zoom, zero, zero, zero, one\n ], axis=0), [3, 3])\n\n # SHIFT MATRIX\n shift_matrix = tf.reshape(tf.concat([\n one, zero, height_shift, zero, one, width_shift, zero, zero, one\n ], axis=0), [3, 3])\n\n return keras.backend.dot(\n keras.backend.dot(rotation_matrix, shear_matrix),\n keras.backend.dot(zoom_matrix, shift_matrix))", "def rotMatrix( source = None ):\n if source is None:\n return None,None\n else:\n (x,y,z, a) = source\n if a % TWOPI:\n return tmatrixaccel.rotMatrix( x,y,z,a ),tmatrixaccel.rotMatrix( x,y,z,-a )\n return None,None", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def lookup_rotation(source_frame, target_frame, tf_listener = None):\n\n # Check the tf_listener and create new one if None\n if tf_listener is None:\n tf_listener = tf.TransformListener()\n\n # Get the transforamtion from baselink to frame\n (trans,rot) = tf_listener.lookupTransform(source_frame, target_frame, rospy.Time(0))\n\n # Compute dot product\n d = sum([a * b for (a,b) in zip([0,-1],trans)])\n d = d / math.sqrt(sum([a ** 2 for a in trans[0:2]]))\n\n return math.acos(d)", "def testCalculateRotationDiff(self):\n # Test identity\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertEqual(result, 0.0)\n # Test arbitrary rotation\n rot1 = numpy.array(\n [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n rot2 = numpy.array(\n [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n transform1[0:3, 0:3] = numpy.matmul(transform1[0:3, 0:3], rot1)\n transform2[0:3, 0:3] = numpy.matmul(transform2[0:3, 0:3], rot2)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Order shouldn't matter\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Test when the angle is pi\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n transform2[0, 0] = -1.0\n transform2[1, 1] = -1.0\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n # It might wrap to -pi, so check the absolute value\n self.assertAlmostEqual(abs(result), numpy.pi, 8)\n # Test an extreme value\n transform2 = -1.0 * numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(abs(result), numpy.pi)", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def compute_RotMats(a, e, t):\n assert len(a)==len(e)==len(t)\n M = len(a)\n\n # camera intrinsic matrix\n Rz = np.zeros((M, 3, 3), dtype=np.float32)\n Rx = np.zeros((M, 3, 3), dtype=np.float32)\n Rz2 = np.zeros((M, 3, 3), dtype=np.float32)\n # C = np.zeros((M, 1, 3), dtype=np.float32)\n # initial \"1\" positions.\n Rz [:, 2, 2] = 1\n Rx [:, 0, 0] = 1\n Rz2[:, 2, 2] = 1\n #\n R = np.zeros((M, 3, 3), dtype=np.float32)\n\n # convert to radius\n a = a * pi / 180.\n e = e * pi / 180.\n t = t * pi / 180.\n\n # update a, e, t\n a = -a\n e = pi/2.+e\n t = -t\n #\n sin_a, cos_a = np.sin(a), np.cos(a)\n sin_e, cos_e = np.sin(e), np.cos(e)\n sin_t, cos_t = np.sin(t), np.cos(t)\n\n # ===========================\n # rotation matrix\n # ===========================\n \"\"\"\n # [Transposed]\n Rz = np.matrix( [[ cos(a), sin(a), 0 ], # model rotate by a\n [ -sin(a), cos(a), 0 ],\n [ 0, 0, 1 ]] )\n # [Transposed]\n Rx = np.matrix( [[ 1, 0, 0 ], # model rotate by e\n [ 0, cos(e), sin(e) ],\n [ 0, -sin(e), cos(e) ]] )\n # [Transposed]\n Rz2= np.matrix( [[ cos(t), sin(t), 0 ], # camera rotate by t (in-plane rotation)\n [-sin(t), cos(t), 0 ],\n [ 0, 0, 1 ]] )\n R = Rz2*Rx*Rz\n \"\"\"\n\n # Original matrix (None-transposed.)\n # No need to set back to zero?\n Rz[:, 0, 0], Rz[:, 0, 1] = cos_a, -sin_a\n Rz[:, 1, 0], Rz[:, 1, 1] = sin_a, cos_a\n #\n Rx[:, 1, 1], Rx[:, 1, 2] = cos_e, -sin_e\n Rx[:, 2, 1], Rx[:, 2, 2] = sin_e, cos_e\n #\n Rz2[:, 0, 0], Rz2[:, 0, 1] = cos_t, -sin_t\n Rz2[:, 1, 0], Rz2[:, 1, 1] = sin_t, cos_t\n # R = Rz2*Rx*Rz\n R[:] = np.einsum(\"nij,njk,nkl->nil\", Rz2, Rx, Rz)\n\n # Return the original matrix without transpose!\n return R" ]
[ "0.8111493", "0.7199534", "0.6795931", "0.656168", "0.65125513", "0.6504331", "0.6497843", "0.64215285", "0.63962364", "0.61423236", "0.61377865", "0.6086032", "0.60602015", "0.60542876", "0.60507715", "0.6009267", "0.5892665", "0.58833164", "0.58649683", "0.5860341", "0.5802039", "0.5785634", "0.57711583", "0.576608", "0.576296", "0.57447475", "0.5737334", "0.57063335", "0.5680408", "0.56621873" ]
0.79258054
1
Method to find the rotation that takes points from m1 onto points in m2. Uses singular value decomposition algorithm taken from Nghia Ho,
def SVD_rotate(m1, m2): assert m1.shape[0] == m2.shape[0] # Find the centroids of m1, m2 centroid1 = np.mean(m1, axis=0) centroid2 = np.mean(m2, axis=0) # Build the covariance matrix H = np.dot((m1 - centroid1).T, (m2 - centroid2)) U, S, V = np.linalg.svd(H) # Middle matrix is to ensure that matrix yields a rotation, not reflection R = np.dot(V.T, np.array([ [1,0,0] , [0,1,0], [0,0, np.linalg.det(np.dot(V.T,U.T))] ]) ) R = np.dot(R, U.T) # Find translation t = -np.dot(R, centroid1) + centroid2 return (R, t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _findrotationmatrix(ccdata1, ccdata2):\n natoms = ccdata1.natom\n J = np.zeros((3, 3), dtype=np.float)\n\n for i in range(natoms):\n J += np.outer(ccdata1.atomcoords[0][i], ccdata2.atomcoords[0][i])\n\n U, s, V = np.linalg.svd(J)\n\n R = np.transpose(np.dot(V, np.transpose(U)))\n\n return R", "def compute_error_minimizing_rotation(Points1, Points2):\r\n #TODO: implement me\r\n\r\n H_1_1 = 0\r\n H_1_2 = 0\r\n H_2_1 = 0\r\n H_2_2 = 0\r\n\r\n for t in range(1, len(Points1)):\r\n H_1_1 = H_1_1 + (Points1[t][0] * Points2[t][0])\r\n H_1_2 = H_1_2 + (Points1[t][1] * Points2[t][0])\r\n H_2_1 = H_2_1 + (Points1[t][0] * Points2[t][1])\r\n H_2_2 = H_2_2 + (Points1[t][1] * Points2[t][1])\r\n\r\n H = [[H_1_1,H_1_2],[H_2_1,H_2_2]]\r\n\r\n U, S, V = numpy.linalg.svd(H)\r\n\r\n V = numpy.transpose(V)\r\n\r\n R_1_1 = (U[0][0] * V[0][0]) +((U[0][1] * V[1][0]))\r\n R_1_2 = (U[0][0] * V[0][1]) +((U[0][1] * V[1][1]))\r\n R_2_1 = (U[1][0] * V[0][0]) +((U[1][1] * V[1][0]))\r\n R_2_2 = (U[1][0] * V[0][1]) +((U[1][1] * V[1][1]))\r\n\r\n R = [[R_1_1,R_1_2],[R_2_1,R_2_2]]\r\n\r\n return R", "def compute_subspace_angles(S1, S2):\n # Check the if the input arrays are 1D or 2D\n if S1.ndim == 1:\n # mat1 = np.reshape(S1, (1,S1.size))\n mat1 = np.reshape(S1, (S1.size, 1))\n elif S1.ndim == 2:\n mat1 = S1\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n if S2.ndim == 1:\n # mat2 = np.reshape(S2, (1,S2.size))\n mat2 = np.reshape(S2, (S2.size, 1))\n elif S2.ndim == 2:\n mat2 = S2\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n\n\n # Do a QR Factorization of S1 and S2\n Q1, R1 = np.linalg.qr(mat1)\n # print('S1 = \\n', S1)\n # print('Q1 = \\n', Q1)\n Q2, R2 = np.linalg.qr(mat2)\n # print('S1 = \\n', S2)\n # print('Q2 = \\n', Q2)\n intmat = np.matmul(Q1.T, Q2)\n # print('intmat = \\n', intmat)\n Y, s, Z = np.linalg.svd(intmat)\n # print('Y = \\n', Y)\n # print('U = \\n', np.matmul(Q1, Y))\n # print('V = \\n', np.matmul(Q2, Y))\n # print('s = \\n', s)\n\n # NaN prevention check\n indices = np.where(s > 1) # Get the indices where the violation exisits\n for entry in indices: # Loop over these indices to fix the violation\n for i in entry:\n if s[i] - 1 < 1.e-13: # This violation limit is pulled out of thin air!\n s[i] = 1.0\n\n s_radians = np.arccos(s)\n\n return s_radians", "def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q2\n if only_xy:\n _q1 = q1[0:2, :]\n _q2 = q2[0:2, :]\n else:\n _q1 = q1\n _q2 = q2\n\n _n = _q1.shape[0]\n A = _q1@_q2.T\n U, s, Vh = svd(A)\n S = eye(_n)\n\n # if reflections are not allowed and the determinant of A is negative,\n # then the entry corresponding to the smallest singular value is negated\n # as in the Kabsch algorithm\n if det(A) < 0 and not allow_reflection:\n S[-1, -1] = -1 # the last entry of the matrix becomes -1\n\n _R = U@S@Vh # optimal\n \n # if only_xy, the top left block of the matrix is _R and the rest is identity matrix\n if only_xy:\n R = eye(n)\n R[0:2, 0:2] = _R\n else:\n R = _R\n \n q2new = R@q2\n\n return (q2new, R)", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def find_rotation(a, b):\n a.shape = (3,)\n b.shape = (3,)\n\n a /= np.linalg.norm(a)\n b /= np.linalg.norm(b)\n \n v = np.cross(a, b)\n \n angle_AB = -1*vector_angle(a, b) \n \n print(angle_AB)\n s = np.linalg.norm(v) * np.sin(angle_AB)\n \n c = np.dot(a, b) * np.cos(angle_AB)\n \n # Rotation matrix, R = I + Vx + Vx^2 * (1-c)/s^2\n I = np.identity(3)\n Vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n \n R = I + Vx + np.linalg.matrix_power(Vx, 2) / (1+c)\n return R", "def rotation(x1, z1, x2, z2):\n e1 = np.zeros(shape=(3, 3))\n e2 = np.zeros(shape=(3, 3))\n e1[0, :] = x1 / np.linalg.norm(x1)\n e1[2, :] = z1 / np.linalg.norm(z1)\n e1[1, :] = np.cross(e1[2, :], e1[0, :])\n e2[0, :] = x2 / np.linalg.norm(x2)\n e2[2, :] = z2 / np.linalg.norm(z2)\n e2[1, :] = np.cross(e2[2, :], e2[0, :])\n R = np.zeros(shape=(3, 3))\n for i in range(3):\n for j in range(3):\n R[i, j] = np.dot(e1[i, :], e2[j, :])\n R = np.transpose(R)\n return R", "def find_rotation(a, b):\n if not np:\n raise PysimmError('pysimm.calc.find_rotation function requires numpy')\n a = np.array(a)\n b = np.array(b)\n\n a_x_b = np.cross(a, b)\n axis = a_x_b / np.linalg.norm(a_x_b)\n theta = acos(np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b))\n\n skew = np.matrix([[0, -axis[2], axis[1]],\n [axis[2], 0, -axis[0]],\n [-axis[1], axis[0], 0]])\n\n rot_matrix = np.identity(3) + sin(theta) * skew + (1 - cos(theta)) * skew * skew\n return rot_matrix", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2,:2] = rotation\n H[:2, 2] = translation\n return H", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def rotation_from_sphere_points_torch(x, y):\n if x.dim() == 1:\n x = x.unsqueeze(-2)\n if y.dim() == 1:\n y = y.unsqueeze(-2)\n\n dim = x.shape[1]\n\n # Compute the inner product\n inner_product = torch.mm(x, y.T)\n # Clamp in case any value is not in the interval [-1,1]\n # A small number is added/substracted to the bounds to avoid NaNs during backward computation.\n inner_product = inner_product.clamp(-1. + 1e-15, 1. - 1e-15)\n\n # Compute intermediate vector\n c_vec = x - y * inner_product\n c_vec = c_vec / torch.norm(c_vec)\n\n R = torch.eye(dim, dim, dtype=inner_product.dtype) + \\\n torch.sin(torch.acos(inner_product)) * (torch.mm(y.T, c_vec) - torch.mm(c_vec.T, y)) + \\\n (inner_product - 1.) * (torch.mm(y.T, y) + torch.mm(c_vec.T, c_vec))\n\n return R", "def rotation_between_anglesets(agls1, agls2):\n\tfrom math import sin, cos, pi, sqrt, atan2, acos, atan\n\tfrom numpy import array, linalg, matrix\n\timport types\n\n\tdeg2rad = pi/180.0\n\n\tdef ori2xyz(ori):\n\t\tif(type(ori) == types.ListType):\n\t\t\tphi, theta, psi = ori[:3]\n\t\telse:\n\t\t\t# it has to be Transformation object\n\t\t\td = ori.get_params(\"spider\")\n\t\t\tphi = d[\"phi\"]\n\t\t\ttheta = d[\"theta\"]\n\t\t\tpsi = d[\"psi\"]\n\t\t\"\"\"\n\t\t# This makes no sense here! PAP 09/2011\n\t\tif theta > 90.0:\n\t\t\tphi += 180.0\n\t\t\ttheta = 180.0-theta\n\t\t\"\"\"\n\t\tphi *= deg2rad\n\t\ttheta *= deg2rad\n\t\tx = sin(theta) * sin(phi)\n\t\ty = sin(theta) * cos(phi)\n\t\tz = cos(theta)\n\n\t\treturn [x, y, z]\n\n\tN = len(agls1)\n\tif N != len(agls2):\n\t\tprint 'Both lists must have the same length'\n\t\treturn -1\n\tif N < 2:\n\t\tprint 'At least two orientations are required in each list'\n\t\treturn -1\n\tU1, U2 = [], []\n\tfor n in xrange(N):\n\t\tp1 = ori2xyz(agls1[n])\n\t\tp2 = ori2xyz(agls2[n])\n\t\tU1.append(p1)\n\t\tU2.append(p2)\n\n\t# compute all Suv with uv = {xx, xy, xz, yx, ..., zz}\n\tSuv = [0] * 9\n\tc = 0\n\tnbori = len(U1)\n\tfor i in xrange(3):\n\t\tfor j in xrange(3):\n\t\t\tfor s in xrange(nbori):\n\t\t\t\tSuv[c] += (U2[s][i] * U1[s][j])\n\t\t\tc += 1\n\n # create matrix N\n\tN = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]], \n\t\t [Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]], \n\t\t [Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]],\n\t\t [Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]])\n\n # eigenvector corresponding to the most positive eigenvalue\n\tval, vec = linalg.eig(N)\n\tq0, qx, qy, qz = vec[:, val.argmax()]\n\n # create quaternion Rot matrix \n\tr = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0,\n\t 2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0,\n\t 2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0]\n\t\n\tR = Transform(r)\n\tdictR = R.get_rotation('SPIDER')\n\n\treturn dictR['phi'], dictR['theta'], dictR['psi']", "def transformation_from_points(points1, points2):\n points1 = points1.astype(np.float64)\n points2 = points2.astype(np.float64)\n\n c1 = np.mean(points1, axis=0)\n c2 = np.mean(points2, axis=0)\n points1 -= c1\n points2 -= c2\n\n s1 = np.std(points1)\n s2 = np.std(points2)\n points1 /= s1\n points2 /= s2\n\n u, _, vt = np.linalg.svd(np.matmul(points1.T, points2))\n r = np.matmul(u, vt).T\n\n return np.hstack(((s2 / s1) * r, (c2.T - (s2 / s1) * np.matmul(r, c1.T)).reshape(2, -1)))", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def find_rotation_and_seed_unique(q1, q2, closed=0, lam=0.0, rotation=True, method=\"DP\"):\n\n n, T = q1.shape\n\n scl = 4.\n minE = 1000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n q2n = shift_f(q2, scl*ctr)\n else:\n q2n = q2.copy()\n \n if rotation:\n q2new, R = find_best_rotation(q1, q2n)\n else:\n q2new = q2n\n R = eye(n)\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, lam, method)\n gamI = uf.invertGamma(gam)\n p2n = q_to_curve(q2n)\n p2n = group_action_by_gamma_coord(p2n,gamI)\n q2new = curve_to_q(p2n)[0]\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (q2best, Rbest, gamIbest)", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H", "def icp_step(Points1,Points2):\r\n #get the correspondences\r\n S1,S2 = get_correspondences(Points1,Points2)\r\n\r\n # Center the resulting pairs substracting their means\r\n S1_shift, mean1 = subtract_mean(S1)\r\n S2_shift, mean2 = subtract_mean(S2)\r\n\r\n #calculate the error-minimizing rotation\r\n R = compute_error_minimizing_rotation(S1_shift,S2_shift)\r\n #find the t such that R*p+t = R*(p-mean2)+mean1\r\n Rmean2 = [R[0][0]*mean2[0]+R[0][1]*mean2[1],\r\n R[1][0]*mean2[0]+R[1][1]*mean2[1]]\r\n\r\n return R,[-(mean1[0]-Rmean2[0]),-(mean1[1]-Rmean2[1])]", "def rotacija_pravouglog_trougla_oko_hipotenuze(s2, s1):\r\n c = math.sqrt(s2 * s2 + s1 * s1)\r\n povrsina_trougla= (s2 * s1) / 2\r\n hc = (2 * povrsina_trougla) / c\r\n H1 = math.sqrt(s1 * s1 - hc * hc)\r\n H2 = math.sqrt(s2 * s2 - hc * hc)\r\n pi= 3.14\r\n povrsina = hc * pi * (s1 + s2)\r\n zapremina = (hc * hc * pi * (H1 + H2)) / 3\r\n return povrsina, zapremina", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def Misorien2FZ1(m1,m2,symtype='Cubic'):\n m2=np.matrix(m2)\n ops=GetSymRotMat(symtype)\n angle=6.3\n for op in ops:\n tmp=m1.dot(op.dot(m2.T))\n cosangle=0.5*(tmp.trace()-1)\n cosangle=min(0.9999999999,cosangle)\n cosangle=max(-0.99999999999,cosangle)\n newangle=np.arccos(cosangle)\n if newangle<angle:\n angle=newangle\n oRes=tmp\n return oRes,angle", "def matrix_discrepancy(centers1, rotations1, centers2, rotations2,\n angle_weight=None, center_weight=None):\n\n n = len(centers1)\n\n assert len(centers2) == n\n assert len(rotations1) == n\n assert len(rotations2) == n\n assert n >= 2\n\n if not angle_weight:\n angle_weight = 1.0\n\n if not center_weight:\n center_weight = [1.0] * n\n\n if n > 2:\n rotation_matrix, new1, mean1, RMSD, sse = \\\n besttransformation_weighted(centers1, centers2, center_weight)\n\n orientation_error = 0\n angles = []\n for r1, r2 in zip(rotations1, rotations2):\n if r1.shape[0] > 0 and r2.shape[0] > 0:\n angle = angle_of_rotation(np.dot(np.dot(rotation_matrix, r2),\n np.transpose(r1)))\n orientation_error += np.square(angle)\n discrepancy = np.sqrt(sse + angle_weight * orientation_error) / n\n\n else:\n\n R1 = np.dot(np.transpose(rotations1[1]),rotations1[0]) # rotation from nt 0 to nt1 of 1st motif\n R2 = np.dot(np.transpose(rotations2[0]),rotations2[1]) # rotation from nt 0 to nt1 of 2nd motif\n\n rot1 = np.dot(R1,R2)\n ang1 = angle_of_rotation(rot1)\n\n rot2 = np.dot(np.transpose(R1),np.transpose(R2))\n ang2 = angle_of_rotation(rot2)\n\n T1 = np.dot(centers1[1] - centers1[0],rotations1[0])\n T2 = np.dot(centers1[0] - centers1[1],rotations1[1])\n\n S1 = np.dot(centers2[1] - centers2[0],rotations2[0])\n S2 = np.dot(centers2[0] - centers2[1],rotations2[1])\n\n D1 = T1-S1\n D2 = T2-S2\n\n discrepancy = np.sqrt(D1[0]**2 + D1[1]**2 + D1[2]**2 + (angle_weight*ang1)**2)\n discrepancy += np.sqrt(D2[0]**2 + D2[1]**2 + D2[2]**2 + (angle_weight*ang2)**2)\n\n# factor = 1/(4*np.sqrt(2)) # factor to multiply by discrepancy; faster to precompute?\n\n discrepancy = discrepancy * 0.17677669529663687\n\n return discrepancy", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def compute_homography(pts1, pts2):\n\n #\n # Your code here\n #\n p1 = np.c_[pts1, np.ones(len(pts1))]\n p2 = np.c_[pts2, np.ones(len(pts2))]\n \n A = np.zeros((2 * p1.shape[0], 9))\n\n for i in range(0, 2 * p1.shape[0], 2):\n\n z = p2[i // 2]\n z_ = p1[i // 2]\n\n A[i][:3] = z_\n A[i + 1][3:6] = z_\n A[i][6:] = -z_ * z[0]\n A[i + 1][6:] = -z_ * z[1]\n \n _, _, Vh = np.linalg.svd(A.T.dot(A))\n V = Vh.T\n H = np.reshape(V[:, -1], (3, 3))\n\n if np.linalg.norm(H) != 1.:\n H /= np.linalg.norm(H)\n \n return H", "def rotation_matrix_from_normals(v0, v1, tol=1e-20):\n\n v0 = mkvc(v0)\n v1 = mkvc(v1)\n\n # ensure both n0, n1 are vectors of length 1\n assert len(v0) == 3, \"Length of n0 should be 3\"\n assert len(v1) == 3, \"Length of n1 should be 3\"\n\n # ensure both are true normals\n n0 = v0*1./np.linalg.norm(v0)\n n1 = v1*1./np.linalg.norm(v1)\n\n n0dotn1 = n0.dot(n1)\n\n # define the rotation axis, which is the cross product of the two vectors\n rotAx = np.cross(n0, n1)\n\n if np.linalg.norm(rotAx) < tol:\n return np.eye(3, dtype=float)\n\n rotAx *= 1./np.linalg.norm(rotAx)\n\n cosT = n0dotn1/(np.linalg.norm(n0)*np.linalg.norm(n1))\n sinT = np.sqrt(1.-n0dotn1**2)\n\n ux = np.array(\n [\n [0., -rotAx[2], rotAx[1]],\n [rotAx[2], 0., -rotAx[0]],\n [-rotAx[1], rotAx[0], 0.]\n ], dtype=float\n )\n\n return np.eye(3, dtype=float) + sinT*ux + (1.-cosT)*(ux.dot(ux))", "def rot_align(m, coeff, pairs):\n n_theta = 360\n p = pairs.shape[0]\n c = np.zeros((m + 1, p), dtype='complex128')\n m_list = np.arange(1, m + 1)\n\n max_iter = 100\n precision = 1e-10\n\n # Find initial points for Newton Raphson\n for i in range(m + 1):\n c[i] = np.einsum('ij, ij -> j', np.conj(coeff[i][:, pairs[:, 0]]), coeff[i][:, pairs[:, 1]])\n\n c2 = np.flipud(np.conj(c[1:]))\n b = (2 * m + 1) * np.real(common.icfft(np.concatenate((c2, c), axis=0)))\n rot = np.argmax(b, axis=0)\n rot = (rot - m) * n_theta / (2 * m + 1)\n\n # creating f' and f'' function\n m_list_ang_1j = 1j * m_list * np.pi / 180\n c_for_f_prime_1 = m_list_ang_1j * c[1:].T\n c_for_f_prime_2 = np.square(m_list_ang_1j) * c[1:].T\n\n def f_prime(x):\n return np.sum(np.real(c_for_f_prime_1 * np.exp(np.outer(x, m_list_ang_1j))), 1)\n\n def f_prime2(x):\n return np.sum(np.real(c_for_f_prime_2 * np.exp(np.outer(x, m_list_ang_1j))), 1)\n\n # Finding brackets, x1<x2 such that sign(f(x1)) != sign(f(x2)) and rot = (x1 + x2) / 2\n step_size = 0.5\n x1 = rot.copy()\n x2 = rot.copy()\n bad_indices = np.full(p, True)\n while np.any(bad_indices):\n x1[bad_indices] -= step_size\n x2[bad_indices] += step_size\n f_x1 = f_prime(x1)\n f_x2 = f_prime(x2)\n bad_indices = f_x1 * f_x2 > 0\n\n # Setting x1, x2 into x_low, x_high such that f(x_low)<f(x_high).\n x_low = x1.copy()\n x_high = x2.copy()\n f_x_low = f_prime(x_low)\n f_x_high = f_prime(x_high)\n x_high_is_low = f_x_high < f_x_low\n tmp = x_low.copy()\n tmp[x_high_is_low] = x_high[x_high_is_low]\n x_high[x_high_is_low] = x_low[x_high_is_low]\n x_low = tmp\n\n # Handling f(x) = 0 case\n f_x_low = f_prime(x_low)\n f_x_low_0 = f_x_low == 0\n x_high[f_x_low_0] = x_low[f_x_low_0]\n f_x_high = f_prime(x_high)\n f_x_high_0 = f_x_high == 0\n x_low[f_x_high_0] = x_high[f_x_high_0]\n\n rts = (x_low + x_high) / 2\n dx = np.abs(x_low - x_high)\n dx_old = dx.copy()\n f = f_prime(rts)\n df = f_prime2(rts)\n for _ in range(max_iter):\n bisect_indices = np.bitwise_or(((rts - x_high) * df - f) * ((rts - x_low) * df - f) > 0,\n np.abs(2 * f) > np.abs(dx_old * df))\n newton_indices = ~bisect_indices\n dx_old = dx.copy()\n\n # Handling out of range indices with Bisect step\n dx[bisect_indices] = (x_high[bisect_indices] - x_low[bisect_indices]) / 2\n rts[bisect_indices] = x_low[bisect_indices] + dx[bisect_indices]\n\n # Handling the rest with newton step\n dx[newton_indices] = f[newton_indices] / df[newton_indices]\n rts[newton_indices] -= dx[newton_indices]\n\n # Stop criteria\n if np.all(np.abs(dx) < precision):\n break\n\n # Else update parameters\n f = f_prime(rts)\n df = f_prime2(rts)\n f_negative = f < 0\n x_low[f_negative] = rts[f_negative]\n x_high[~f_negative] = rts[~f_negative]\n\n # Changing low and high of converged points\n converged = np.abs(dx) < precision\n x_low[converged] = rts[converged]\n x_high[converged] = rts[converged]\n\n print(np.sum(np.abs(dx) < precision))\n\n rot = rts\n m_list = np.arange(m + 1)\n m_list_ang = m_list * np.pi / 180\n c *= np.exp(1j * np.outer(m_list_ang, rot))\n corr = (np.real(c[0]) + 2 * np.sum(np.real(c[1:]), axis=0)) / 2\n\n return corr, rot" ]
[ "0.6994456", "0.69056857", "0.6586826", "0.6516587", "0.6442773", "0.6389191", "0.63765293", "0.62826663", "0.62586", "0.6233807", "0.619714", "0.61885524", "0.61745304", "0.6143739", "0.6107027", "0.61043227", "0.61020106", "0.608416", "0.608416", "0.6079441", "0.6075789", "0.6073118", "0.60476243", "0.6038673", "0.6036546", "0.60357565", "0.6004052", "0.5965147", "0.5957331", "0.59384346" ]
0.7534391
0
Generate some polynomial data
def generate_polynomial(): degree = numpy.random.choice(range(3, 7)) x = numpy.linspace(-10, 10, 1000) coefficients = numpy.random.chisquare(3, size=degree) + 1 coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape) coefficients *= 0.5 y = numpy.polyval(coefficients, x) add_noise(y, 0.1) return x, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi", "def construct_poly(data, power):\n return np.power(data, power)", "def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out", "def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def genpoly(sum_count=10, deg=5, cof=10, min_count=1):\n\n p = Polynome([0], '')\n d_prev = -1\n while p.length < min_count:\n p.reset()\n for j in range(sum_count):\n d = randrange(deg)\n c = randrange(-cof, cof)\n while d == d_prev and c != 0:\n d = randrange(deg)\n c = randrange(-cof, cof)\n d_prev = d\n p.plus(c, d)\n return p", "def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def polygen(count=10, sum_count=10, deg=5, cof=10):\n\n s = enumi_beg\n ans = enumi_beg\n\n for i in range(count):\n s += item_beg\n ans += item_beg\n p = genpoly(sum_count, deg, cof)\n ans += p.print_out()\n s += p.rep + item_end\n ans += item_end\n s += enumi_end\n ans += enumi_end\n return s, ans", "def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx", "def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients", "def _generate_poly_array(self, nchan, coeff=[]):\n if nchan < 0:\n raise ValueError, \"nchan should be >=0\"\n if len(coeff)==0:\n if nchan ==0: return []\n else: raise ValueError, \"No valid coefficient given.\"\n polyarr = numpy.zeros(nchan)\n for iorder in range(len(coeff)):\n polyarr += coeff[iorder]*numpy.array(xrange(nchan))**iorder\n return polyarr", "def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None", "def get_poly(kwargs):\n from sklearn.preprocessing import PolynomialFeatures\n return PolynomialFeatures(**kwargs)", "def generate_polynomial_examples(number_of_examples, number_of_observations):\n a2, a3, a4 = generate_double_a2_a3_a4_coefficients(number_of_examples)\n examples = generate_examples_from_coefficients(a2, a3, a4, number_of_observations)\n examples += np.random.normal(0, 0.1, examples.shape)\n labels = np.squeeze(a3[:, 0], axis=-1)\n return examples, labels", "def polynomial_creator(*coefficients):\n def polynomial(x):\n res = 0\n for index, coeff in enumerate(coefficients):\n res += coeff * x** index\n return res\n return polynomial", "def _create_ploynomial_array(self, coeff, x):\n xarr = numpy.array(x)\n yarr = numpy.zeros(len(xarr))\n for idim in range(len(coeff)):\n ai = coeff[idim]\n yarr += ai*xarr**idim\n return yarr", "def generate_poly(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n #atoms = { \n # (h,) : symbols('h_%d'%h)\n # for h in xrange(1, k+1)\n # }\n #atoms[(k,)] = 1. - sum( symbols('h_%d'%h) for h in xrange(1, k) )\n\n atoms = {}\n for h in xrange(1,k+1):\n atoms.update({ \n (h,x1) : symbols('x_%d%d'%(h,x1))\n for x1 in xrange(1,d+1)\n })\n #atoms[(h,d)] = 1. - sum(symbols('x_%d%d'%(h,x1)) for x1 in xrange(1,d))\n\n m = {}\n for x1 in xrange(1,d+1):\n m[(x1,)] = poly( sum( atoms[(h,x1)] for h in xrange(1,k+1) ) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = poly( sum( atoms[(h,x1)] * atoms[(h,x2)] for h in xrange(1,k+1) ) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = poly( sum( atoms[(h,x1)] * atoms[(h,x2)] * atoms[(h,x3)] for h in xrange(1,k+1) ) )\n\n return m", "def get_data_poly_noise(start, stop, noise_rel=0.1, num=50, order=1):\n \n x = (stop - start) * np.random.random_sample(size=num) + start \n #coefficients for the polynomial in [-5,5]\n poly_coeff = 10 * np.random.random_sample(size=order+1) - 5\n \n #create polynomial\n y = np.zeros(x.shape)\n for i in range(order+1):\n y += poly_coeff[i] * x**i\n \n noise_mag = noise_rel * np.abs((np.max(y) - np.min(y)))\n #add noise in [-noise_mag/2, noise_mag/2]\n y += noise_mag * np.random.random_sample(size=num) - noise_mag/2\n \n return (x, y)", "def base_polynome(numbers):\n\n monomes = [ x**n for n in numbers ]\n polynome = sum(monomes)\n\n return poly(polynome, x)", "def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2", "def gen_rand_poly(deg_lower_limit = 1, deg_upper_limit = 10, coeff_limit = 10):\n deg = random.randint(deg_lower_limit,deg_upper_limit)\n coeffs = [random.randint(-coeff_limit, coeff_limit) for _ in range(deg+1)]\n\n # Never have 0 as leading coefficient\n if coeffs[deg] == 0:\n coeffs[deg] = 1\n\n def term(coeff, d):\n if coeff == 0:\n return ''\n elif d == 0:\n return (' + ' if coeff>0 else ' - ') + str(abs(coeff))\n elif d == 1:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x' if abs(coeff)!=1 else 'x')\n elif d == deg:\n return ('' if coeff>0 else '-') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n else:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n\n terms = [term(coeffs[d], d) for d in range(deg+1)]\n return deg, coeffs, ''.join([terms[d]for d in range(deg,-1,-1)]).strip('+ ')", "def polynomial(a, x):\n\n sum = 0\n\n for i in range(len(a)):\n sum += a[i] * x**i\n return sum", "def random_polynomial(self, degree: int) -> Polynomial:\n p = self.polynomial(*[self.random_element() for _ in range(0, degree)])\n p += p.monic(degree)\n return p", "def generate_coefficients_data(poly_degree: int, performance_data: pd.DataFrame, param_columns: typing.List) -> pd.DataFrame:\n if poly_degree != 2:\n logging.warning('Not Implemented: polynomial degree of > 2. Will use degree 2 for meta-model')\n coef_names = get_coefficient_names()\n results = []\n for idx, task_id in enumerate(performance_data['task_id'].unique()):\n frame_task = performance_data.loc[performance_data['task_id'] == task_id]\n model = sklearn.linear_model.LinearRegression(fit_intercept=False)\n poly_feat = sklearn.preprocessing.PolynomialFeatures(2)\n X = poly_feat.fit_transform(frame_task[param_columns])\n y = frame_task['predictive_accuracy']\n model.fit(X, y)\n result = {\n 'task_id': task_id,\n coef_names[0]: model.coef_[0],\n coef_names[1]: model.coef_[1],\n coef_names[2]: model.coef_[2],\n coef_names[3]: model.coef_[3],\n coef_names[4]: model.coef_[4],\n coef_names[5]: model.coef_[5],\n }\n results.append(result)\n return pd.DataFrame(results).set_index('task_id')", "def build_poly(tx, degree) :\n shape = tx.shape\n poly = np.zeros((shape[0], shape[1] * degree))\n poly[:,:shape[1]] = tx\n for deg in range(2, degree + 1) :\n for j in range(0, shape[1]) :\n poly[:, shape[1] * (deg - 1) + j] = tx[:,j] ** deg\n return poly", "def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T", "def phi_poly(self,x,i):\n return x**i", "def definePolyFunction():\n lstWeights=[]\n degree = input(\"degree of polynomial in terms of highest exponent of x:\")\n degree = int(degree+1)\n for a in range (0,degree):\n string='weight for x^'+str(a)+':'\n weight = input(string)\n weight = float(weight)\n lstWeights.append(weight)\n return lstWeights" ]
[ "0.7538903", "0.7480027", "0.7236792", "0.7120117", "0.7109104", "0.7046839", "0.6833415", "0.6825309", "0.6752898", "0.6739122", "0.6643049", "0.6626195", "0.6606784", "0.65941375", "0.65254253", "0.65011287", "0.64472425", "0.64471453", "0.63934326", "0.63751334", "0.63060313", "0.6287972", "0.6272439", "0.62695813", "0.62354094", "0.6231072", "0.62251425", "0.61995375", "0.61994433", "0.619298" ]
0.7791223
0
Calls a function, with the function's params being the object's __dict__ values.
def call(self, func): args = tuple(self.__dict__.values()) try: return eval("func" + str(args)) except Exception, e: raise ValueError("Given Function is not valid for calling: %s" % e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _call_it(params): # pragma: no cover\n instance, name, args = params\n kwargs = {}\n return getattr(instance, name)(*args, **kwargs)", "def call_func(self, quantity, obj, args):\n\n try:\n result = getattr(obj, quantity)(*args)\n except KeyError:\n self.log.error(\"Unable to call the following function: %s\",\n quantity, exc_info=True, stack_info=True)\n raise\n return result", "def call(obj, /, *args, **kwargs):\n return obj(*args, **kwargs)", "def call_in_function(self, func_obj, parameters=None):\n if func_obj.name not in self.calls:\n self.calls[func_obj.name] = []\n if parameters is not None:\n p = dict(parameters)\n self.calls[func_obj.name].append([p.get(str(i), 0)\n for i in range(1, len(func_obj.declaration.parameters) + 1)])", "def __call__(self, *args, **kwargs):\n for key, obj in self._dict.items():\n key[0](obj, *args, **kwargs)", "def _call_func(quantity, obj, args):\n\n log = logging.getLogger(__name__)\n try:\n result = getattr(obj, quantity)(*args)\n except AttributeError:\n log.error(\"Object %s has no method: %s\", str(obj), quantity)\n raise\n except:\n log.error(\"Error while calling method %s of object %s\", quantity,\n str(obj))\n raise\n return result", "def apply(self, func):\r\n return func(**self.kwargs)", "def __call__(self, *args, **kwargs):\n return function(args, addtional_info)", "def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(**seldict)", "def __call__(obj):", "def call(self, *args, **kwargs):", "def __call__(self, *arg, **kwargs):\n return self._fun(*arg, **kwargs)", "def __call__(self, *args, **kwargs) -> Dict[str, Any]:\n callable_obj: List[Callable[[Any], Any]] = []\n result_funcs: Dict[str, Any] = {}\n\n for obj in self.__dict__.values():\n if callable(obj):\n callable_obj.append(obj)\n if callable_obj:\n for call_obj in callable_obj:\n result_funcs[call_obj.__name__] = call_obj(*args, **kwargs)\n return result_funcs", "def from_object(cls, obj):\n if any(p is obj for p in obj.params):\n raise ValueError(\n f\"Cannot create a Function from a parameter object. This parameter {obj._name!r} \"\n \"is like an argument to a function---not the body of the function itself.\"\n )\n\n named_args = {p._name: getattr(p, \"_proxytype\", type(p)) for p in obj.params}\n # ^ if any of the params are widgets (likely), use their base Proxytype in the Function type signature:\n # a Function[Checkbox, Slider, ...] would be 1) weird and 2) not serializeable.\n concrete_function_type = cls[named_args, type(obj)]\n\n graft = client.function_graft(obj, *(p.graft for p in obj.params))\n # TODO we should probably store `obj.params` somewhere---that's valuable metadata maybe\n # to show the function as widgets, etc?\n return concrete_function_type._from_graft(graft)", "def call(self, **kwargs):\n return getattr(self.resource, self.function)(**kwargs)", "def fn(*args, **kwargs):\n pass", "def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "def func_call(self, t):\n func, params = t\n func_name = func.value\n func.value = \"({}({}))\".format(func_name, params)\n return func", "def __call__(self, *args, **kwargs):\n return self.f(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n return self._func(*args, **kwargs)", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def __call__(self, *args, **kw):\n return self.callable(*args, **kw)", "def __call__(self):\n # apply(self.func, self.args)\n self.func(*self.args)", "def call(self, method, name, params=None, payload=None, **kwds):", "def exec_params(call, *args, **kwargs):\n arg_spec = getattr(call, '_argspec', None)\n if arg_spec and not arg_spec.keywords:\n kwargs = {key: value for key, value in kwargs.iteritems()\n if key in arg_spec.args}\n return call(*args, **kwargs)", "def func(*args, **kwargs):\n return call(*args, **kwargs) # pylint: disable = E1102", "def run(self):\n self.fn(*self.args, **self.kwargs)", "def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)", "def __call__(self, result_path=None, log_path=None, *args, **kwargs):\r\n return FunctionWithParams.__call__(self, result_path=result_path,\r\n log_path=log_path, *args, **kwargs)" ]
[ "0.6939864", "0.65609187", "0.64944154", "0.6451387", "0.6444234", "0.6434979", "0.63308376", "0.62893623", "0.61917114", "0.61473817", "0.6143386", "0.61355615", "0.6069545", "0.60603714", "0.60187966", "0.6007016", "0.59852356", "0.59852356", "0.5959359", "0.59573925", "0.5897727", "0.5861894", "0.57663035", "0.57335174", "0.5724398", "0.5720547", "0.570953", "0.56956595", "0.5691273", "0.5683867" ]
0.6599899
1
Inserts given 'value' inside self.__dict__, with it's key being the inputted 'pos' value. This function is similar to the insert function belonging to lists.
def insert(self, pos, value): items = self.__dict__.values() if not isinstance(pos, int) or pos < 0: raise ValueError("'pos' value is not positive integer.") elif pos > len(items): raise ValueError("'pos' value is not a position in self.__dict__") items.insert(pos, value) new_dict = {} for x, y in enumerate(items): new_dict.update({x: y}) self.__dict__ = new_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_item(self, key: _KT, value: _VT) -> None:\n dict.__setitem__(self, key, value)", "def insert(self, key, value):\n\t\tself.__insert(key, value, key[1:])", "def __setitem__(self, key, value):\n self.insert(key, value)", "def insert(self, value, pos):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n\r\n if pos == 0:\r\n self.prepend(value)\r\n return\r\n\r\n index = 0\r\n node = self.head\r\n while node.next and index <= pos:\r\n if (pos - 1) == index:\r\n new_node = Node(value)\r\n new_node.next = node.next\r\n node.next = new_node\r\n return\r\n\r\n index += 1\r\n node = node.next\r\n else:\r\n self.append(value)", "def __setitem__(self,k,v):\n self.insert(k,v)", "def _insert(self, key, value):\n entry = self._lookup(key)\n if entry.value is None:\n self.used += 1\n if entry.key is not dummy:\n self.filled += 1\n entry.key = key\n entry.hash = self.first_hash(key)\n entry.value = value", "def insert(self, index, key, value):\r\n if key in self.keyOrder:\r\n n = self.keyOrder.index(key)\r\n del self.keyOrder[n]\r\n if n < index:\r\n index -= 1\r\n self.keyOrder.insert(index, key)\r\n super(OrderedDict, self).__setitem__(key, value)", "def insert(self, index, key, value):\n if key in self:\n # FIXME: efficiency?\n del self[key]\n self._sequence.insert(index, key)\n dict.__setitem__(self, key, value)", "def _map___setitem__(self, key, value):\n if not isinstance(key, self.keytype):\n raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if not isinstance(value, self.valuetype):\n raise KeyError('type of `value` should be ' + repr(self.valuetype) + ' but got ' + type(value))\n self.insert(key, value)\n return", "def insert(self,key, value):\n if key in self._position:\n # reset value for this node\n node_pos = self._position[key]\n node = self._heap[node_pos]\n node.value = value\n self._sink(node_pos)\n self._swim(node_pos)\n else:\n # insert a new node\n new_node = _Node(key,value)\n node_pos = len(self._heap)\n self._heap.append(new_node)\n self._position[key] = node_pos\n\n # repair priority\n self._swim(node_pos)", "def insert(self, key, value):\r\n self._data.append(self._Item(key, value))\r\n self._upheap(len(self._data) - 1) # upheap newly added position\r", "def insert(self, key, value):\n # Find the leaf node where to do the insertion.\n if not self.is_leaf():\n insert_point = self.get_position(key)\n return self.refs[insert_point].insert(key, value)\n\n # Located a leaf node, so insert the (key, value) pair.\n insert_point = self.get_position(key)\n self.keys.insert(insert_point, key)\n self.values.insert(insert_point, value)\n\n if self.is_full():\n self.split()\n\n return self", "def insert(self, key, value=None):\n if isinstance(key, list):\n for k in key:\n self.insert(k)\n else:\n if key == self.key:\n # update key: value\n self.value = value\n elif key < self.key:\n if self.left == None:\n self.left = Tree(key, value)\n else:\n self.left.insert(key, value)\n else:\n if self.right == None:\n self.right = Tree(key, value)\n else:\n self.right.insert(key, value)", "def insert(self, key, value):\n # Resize array here if necessary.\n if key < 0: key = 0\n elif key > len(self): key = len(self)\n if key < len(self):\n for j in range(len(self), key, -1):\n self._items[j] = self._items[j - 1]\n self._items[key] = value\n self._size += 1\n self.incModCount()", "def add(self, key, value, location):\r\n i = self.index_for_location(location)\r\n if i is not None:\r\n self.insert(i, key, value)\r\n else:\r\n self.__setitem__(key, value)", "def insert_after(self, key, value):\n self._insert_after(self.head, key, value)", "def insert(self, key, value):\n if key in self.map:\n return\n\n try:\n tag_key = TagKey(key)\n tag_val = TagValue(value)\n self.map[tag_key] = tag_val\n except ValueError:\n raise", "def insert(self, key, val):\n self.dict.setdefault(key, []).append(val)", "def insert(self, key, value):\n\n val = 0\n val = self.search(self.key)\n\n if self.key == key:\n self.val = value\n elif key < self.key:\n if self.left is None:\n self.left = self.__class__(key, value)\n else:\n self.left.insert(key, value)\n else:\n if self.right is None:\n self.right = self.__class__(key, value)\n else:\n self.right = self.right.insert(key, value)\n\n return self", "def insert(self, key: K, value: V) -> None:\n if key in self.__key_map__:\n self.remove(key)\n\n entry = (value, next(self.counter), key)\n self.__key_map__[key] = entry\n\n heapq.heappush(self.queue, entry)", "def insert(self, key, value):\n hash_key = hash(key) % self.length\n bucket = self.array[hash_key]\n for idx, key_val_pair in enumerate(bucket):\n k, v = key_val_pair\n if k == key:\n bucket[idx] = [key, value]\n return\n bucket.append([key, value])", "def __setitem__(key, value):", "def insert(self, key, value):\n\n if self.key == key:\n self.val = value\n elif key < self.key:\n if self.left is None:\n self.left = self.__class__(key, value)\n else:\n self.left = self.left.insert(key, value)\n else:\n if self.right is None:\n self.right = self.__class__(key, value)\n else:\n self.right = self.right.insert(key, value)\n\n return self", "def __setitem__(self, key, value):\n ndx = self._findPosition(key)\n if ndx:\n self._entryList[ndx].value = value\n return False\n else:\n entry = _MapEntry(key, value)\n self._entryList.append(entry)\n return True", "def __setitem__(self, key, value):\n if not key in self.ordered_list:\n self.ordered_list.append(key)\n self.__dict__[key] = value", "def __setitem__(self, key, value):\n index=self._index(key)\n if index==-1:\n self._item.append(Item(key,value))\n self._size+=1\n else:\n self._item[index].value=value", "def __setitem__(self, key, value):", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def insert(self, key: str, value: object) -> None:\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1", "def __setitem__(self, pos, val):\n self._coords[pos] = val" ]
[ "0.72585404", "0.69225717", "0.69138044", "0.6725754", "0.6710904", "0.66295755", "0.6614174", "0.659485", "0.65802276", "0.65781033", "0.6519577", "0.6513265", "0.6459961", "0.64219296", "0.6367862", "0.6348602", "0.6346603", "0.6286433", "0.62864274", "0.6267077", "0.6241228", "0.6205147", "0.62015647", "0.61985534", "0.61777264", "0.6120801", "0.60995674", "0.60631096", "0.60631096", "0.6047492" ]
0.8579885
0
split_file for the 'src' project in the trunk.
def ChromeTreeFileSplitter(path): # Exclude .DEPS.git from triggering builds on chrome. if path == 'src/.DEPS.git': return None # List of projects we are interested in. The project names must exactly # match paths in the Subversion repository, relative to the 'path' URL # argument. build_utils.SplitPath() will use them as branch names to # kick off the Schedulers for different projects. projects = ['src'] return build_utils.SplitPath(projects, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_src_folder(self, src_folder: str) -> None:", "def source_data_files(self, data_dir, tmp_dir, dataset_split):\n raise NotImplementedError()", "def split(self, start, nsamps, filename=None, gulp=1024,\n back_compatible=True, **kwargs):\n if filename is None:\n filename = f\"{self.header.basename}_{start:d}_{start+nsamps:d}.fil\"\n new_tstart = self.header.tstart + ((self.header.tsamp * start) / 86400.0)\n out_file = self.header.prepOutfile(\n filename, updates={'tstart': new_tstart}, nbits=self.header.nbits\n )\n for _count, _ii, data in self.readPlan(\n gulp, start=start, nsamps=nsamps, **kwargs,\n ):\n out_file.cwrite(data)\n out_file.close()\n return out_file.name", "def split(self):\n\n # FIXME: user should be able to change the default behavior of\n # this function (for instance user may require one filter not\n # to split the content of the input file and the same input \n # to be used by the next filter.\n \n utils.split_file(self.files['hit_ids'],\n self.files['input'],\n self.files['filtered_reads'],\n self.files['survived_reads'])", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def _split_lint_files(self):\n lint_files_list = copy.deepcopy(self._facts[\"lint_files\"])\n for lint_file in lint_files_list:\n if lint_file.name.startswith(\"test_\") or lint_file.name.endswith(\n \"_test.py\"\n ):\n self._facts[\"lint_unittest_files\"].append(lint_file)\n self._facts[\"lint_files\"].remove(lint_file)", "def main(opts):\n\n # split the file\n split_file(opts['in_file'], opts['num_splits'], opts['split_dir'], opts['mut_file'])", "def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()", "def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size", "def getBaseSrcFile(self) -> List[int]:\n ...", "def main():\n parser = argparse.ArgumentParser(\n description=\"Script that splits a list of files of vtb trees into train/dev/test sets\",\n )\n parser.add_argument(\n 'org_dir',\n help='The location of the original directory storing correctly formatted vtb trees '\n )\n parser.add_argument(\n 'split_dir',\n help='The location of new directory storing the train/dev/test set'\n )\n\n args = parser.parse_args()\n\n org_dir = args.org_dir\n split_dir = args.split_dir\n\n random.seed(1234)\n\n split_files(org_dir, split_dir)", "def source_files(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___DebuggedSourceFile]:", "def SplitPackage(filename):\n full_path = os.path.realpath(filename)\n parent, child = os.path.split(full_path)\n while parent != '/' and os.path.exists(os.path.join(parent, '__init__.py')):\n child = os.path.join(os.path.basename(parent), child)\n parent = os.path.dirname(parent)\n return parent, child", "def lemon_bidscoin_prepare(src_path):\n lemon_prepare()\n this_dir = os.path.dirname(__file__)\n data_dir = os.path.join(this_dir,'..','_data')\n root_path = os.path.abspath(os.path.join(data_dir,'lemon'))\n bidscoin_input_path = src_path\n\n os.makedirs(bidscoin_input_path,exist_ok=True)\n\n files = _get_files(root_path)\n files = [x for x in files if x.split('.')[-1] in ['eeg','vmrk','vhdr'] ]\n\n files_out = []\n for f in files:\n session = 'ses-001'\n task = 'resting'\n head,tail=os.path.split(f)\n sub = tail.split('.')[0]\n new_path = os.path.join(bidscoin_input_path,sub,session,task,tail)\n files_out.append(new_path)\n\n for old,new in zip(files,files_out):\n print(old,' to ',new)\n os.makedirs(os.path.split(new)[0], exist_ok=True)\n if not os.path.isfile(new):\n shutil.copy2(old,new)\n else:\n print('already done, skipping...')\n print('finish')", "def get_test_files():\n repo_fs()\n return TEST_FILES", "def read_tsv(self,f=None):\n if f:\n self.pfile = f\n\n n = 0\n cols = {}\n with open(self.pfile, 'rb') as csvfile:\n freader = csv.reader(csvfile, delimiter=\"\\t\", quotechar='|')\n for row in freader:\n if len(row) == 0:\n continue\n\n n += 1\n if n == 1:\n cols = row\n log.debug(\"header\".format(cols) )\n continue\n\n # 0)\n name = row[0]\n\n # 1) code\n #self.projects[ name]['code'] = row[1]\n\n # 2)\n if row[2]:\n rp = row[2]\n else:\n rp = row[1]\n\n if self.repo_root:\n repo = os.path.join( self.repo_root, rp)\n else:\n repo = None\n\n gp = GProject(name, row[1], wiki=self.wiki_url, repo=repo)\n\n if row[1]:\n gp.code = row[1]\n\n # 3) build dir\n if len(row) >= 4 and row[3]:\n build_dir = row[3]\n else:\n # default to code\n build_dir = row[1]\n\n # 4) prefix\n if len(row) >= 5 and row[4]:\n gp.prefix = row[4]\n\n if n >= 3:\n #break\n pass\n # 7) rel\n if len(row) >= 8 and row[7]:\n gp.rel_path = row[7]\n\n gp.set_primary_repo()\n\n gp.set_release_file();\n gp.read_release();\n if gp.release_file:\n\n pass\n\n if len(row) >= 6 and row[5]:\n gp.set_wiki_pages( row[5] )\n\n self.projects[name] = gp\n\n # remaining items in row\n # results = [gp.add_repo(os.path.join( self.repo_root, row[k] ) for k in range(8, len(row) if row[k]) ]\n for k in range(8,len(row)):\n #print(\"related:\", row[k])\n if not row[k]:\n continue\n repo = os.path.join( self.repo_root, row[k])\n gp.add_repo(repo )", "def splitFile(f, rootdir=\"/tmp\", splitCmd=\"/usr/bin/split\", chunkSize=\"100m\"):\n d = str(uuid.uuid4())\n path = os.path.join(rootdir, d)\n # I want it to fail hard here\n os.makedirs(path)\n prefix = os.path.join(path, \"chunk-\")\n subprocess.check_call([splitCmd, \"-b\", chunkSize, \"-d\", \"-a\", \"5\", f, prefix])\n chunks = glob.glob(os.path.join(path, \"chunk-*\"))\n chunks.sort()\n return chunks", "def collect_project_source_files():\n source_files = glob.glob(PROJECT_SOURCE_FILES_FOLDER + '/**/*.py', recursive=True)\n # Insert root main.py at the beginning.\n source_files.insert(0, os.path.join(PROJECT_ROOT_FOLDER, 'main.py'))\n return list(map(lambda path: posixpath.join(*path.split('\\\\')), source_files))", "def testA_FileSplitting(self):\n splitter = SplitterFactory()\n\n oneSetSubscription = self.createSubscription(nFiles=10, lumisPerFile=1)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=oneSetSubscription)\n\n jobGroups = jobFactory(lumis_per_job=3, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertTrue(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 100 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 100 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n twoLumiFiles = self.createSubscription(nFiles=5, lumisPerFile=2)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=twoLumiFiles)\n jobGroups = jobFactory(lumis_per_job=1, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 50 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 50 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n wholeLumiFiles = self.createSubscription(nFiles=5, lumisPerFile=3)\n jobFactory = splitter(package=\"WMCore.WMBS\", subscription=wholeLumiFiles)\n jobGroups = jobFactory(lumis_per_job=2, halt_job_on_file_boundaries=True, performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 1)\n # 10 because we split on run boundaries\n self.assertEqual(len(jobGroups[0].jobs), 10)\n jobList = jobGroups[0].jobs\n for idx, job in enumerate(jobList, start=1):\n # Have should have one file, half two\n self.assertEqual(len(job['input_files']), 1)\n if idx % 2 == 0:\n self.assertEqual(job['estimatedJobTime'], (1.0 * round(100 / 3)) * 12)\n self.assertEqual(job['estimatedDiskUsage'], (1.0 * round(100 / 3)) * 400)\n else:\n self.assertEqual(job['estimatedJobTime'], (2.0 * round(100 / 3)) * 12)\n self.assertEqual(job['estimatedDiskUsage'], (2.0 * round(100 / 3)) * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)\n\n mask0 = jobList[0]['mask'].getRunAndLumis()\n self.assertEqual(mask0, {0: [[0, 1]]})\n mask1 = jobList[1]['mask'].getRunAndLumis()\n self.assertEqual(mask1, {0: [[2, 2]]})\n mask2 = jobList[2]['mask'].getRunAndLumis()\n self.assertEqual(mask2, {1: [[100, 101]]})\n mask3 = jobList[3]['mask'].getRunAndLumis()\n self.assertEqual(mask3, {1: [[102, 102]]})\n\n j0 = Job(id=jobList[0]['id'])\n j0.loadData()\n self.assertEqual(j0['mask'].getRunAndLumis(), {0: [[0, 1]]})\n\n # Do it with multiple sites\n twoSiteSubscription = self.createSubscription(nFiles=5, lumisPerFile=2, twoSites=True)\n jobFactory = splitter(package=\"WMCore.WMBS\",\n subscription=twoSiteSubscription)\n jobGroups = jobFactory(lumis_per_job=1,\n halt_job_on_file_boundaries=True,\n performance=self.performanceParams)\n self.assertEqual(len(jobGroups), 2)\n self.assertEqual(len(jobGroups[0].jobs), 10)\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job['input_files']), 1)\n self.assertEqual(job['estimatedJobTime'], 50 * 12)\n self.assertEqual(job['estimatedDiskUsage'], 50 * 400)\n self.assertEqual(job['estimatedMemoryUsage'], 2300)", "def split_file(filename, split_num):\n root, ext = os.path.splitext(filename)\n with open(filename) as f:\n lines = f.readlines()\n total_line = len(lines)\n\n print lines[0].split('\\t')\n\n size = total_line / split_num\n\n print 'Total line: %d, splited file line number: %d' % (total_line, size)\n\n total_line - size * split_num\n for i in range(0, split_num):\n split_file = root + '_' + str(i+1) + ext\n\n start = i * size;\n end = (i+1) * size;\n if i == split_num - 1:\n end = total_line\n\n print 'splite file %s: line from %d to %d' % (split_file, start, end)\n\n with open(split_file, 'w') as fw:\n for j in range(start, end):\n fw.write('%s' % lines[j])", "def create_initial_file():\n\n merge_file = tempfile.NamedTemporaryFile()\n\n # spin the sources for the base file\n for source in sort_sources(\n recursive_glob(settings[\"datapath\"], settings[\"hostfilename\"])\n ):\n\n start = \"# Start {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n end = \"\\n# End {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n\n with open(source, \"r\", encoding=\"UTF-8\") as curFile:\n write_data(merge_file, start + curFile.read() + end)\n\n # spin the sources for extensions to the base file\n for source in settings[\"extensions\"]:\n for filename in sort_sources(\n recursive_glob(\n path_join_robust(settings[\"extensionspath\"], source),\n settings[\"hostfilename\"],\n )\n ):\n with open(filename, \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n maybe_copy_example_file(settings[\"blacklistfile\"])\n\n if os.path.isfile(settings[\"blacklistfile\"]):\n with open(settings[\"blacklistfile\"], \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n return merge_file", "def split_file(in_file, num_splits, split_dir, mut_file):\n\n # create the output directory if it does\n # not exist\n if not os.path.exists(split_dir):\n os.mkdir(split_dir)\n\n # open the info file\n f = open(in_file)\n pdb_header = f.readline()\n\n # open the mutation file\n m = open(mut_file)\n mut_header = m.readline()\n\n # read into a dictionary containing\n # structure ids as keys and lines pertaining\n # to it as values\n pdb_dict = read_file(f)\n mut_dict = read_file(m)\n\n # determine total num of ids in file\n total_ids = len(list(pdb_dict.keys()))\n print(total_ids)\n # determine num of ids to put in each split\n num_ids = int(total_ids/num_splits)\n\n # counters\n count_file = 0\n count_id = num_ids\n\n # randomize order of insertions\n keys = list(pdb_dict.keys())\n random.shuffle(keys)\n\n # iterate through dict and write to files\n #for key in sorted(pdb_dict):\n for key in keys:\n\n # check if we need a new file\n if (count_id == num_ids and count_file < num_splits):\n count_id = 0\n pdb_out = open(split_dir + \"/pdb_info_split_\" + str(count_file) + \".txt\", 'w')\n pdb_out.write(pdb_header)\n mut_out = open(split_dir + \"/mut_info_split_\" + str(count_file) + \".txt\", 'w')\n mut_out.write(mut_header)\n count_file += 1\n\n # write all lines pertaining to the structure id\n for line in pdb_dict[key]:\n pdb_out.write(line)\n if key in mut_dict:\n for line in mut_dict[key]:\n mut_out.write(line)\n\n count_id += 1", "def source_test_file_name():\n return 'feature'", "def make_release_tree(self, base_dir, files):\n files.append(\"Cargo.toml\")\n files += [str(f) for f in pathlib.Path(\"src\").glob(\"**/*.rs\") if f.is_file()]\n super().make_release_tree(base_dir, files)", "def project_root_files():\n return [\"parent_workflow.wdl\"]", "def get_source_files(self):\n return [\n path.as_posix()\n for path in _Path(self.src_dir).rglob(\"*\")\n if not path.is_dir()\n ] + [\n (path / \"CMakeLists.txt\").as_posix()\n for path in _PurePath(self.src_dir).parents\n ]", "def test_compile_local_files(self, tester_login):\n filenames = os.listdir(COMPILE_TESTER_DIR)\n test_files = [os.path.join(COMPILE_TESTER_DIR, name) for name in filenames]\n projects = [self.upload_project('#uploadFolderZip form', fname,\n os.path.splitext(os.path.basename(fname))[0]) for fname\n in test_files]\n flag = True\n while flag:\n uploaded_sketches = self.get_elements(By.CSS_SELECTOR, '#project_list > li')\n if len(uploaded_sketches) >= len(projects):\n flag = False\n break\n time.sleep(1)\n self.compile_all_sketches(COMPILE_TESTER_STAGING_URL,\n '#user_projects tbody a',\n iframe=False,\n compile_type='sketch',\n create_report=True, logfile=COMPILE_TESTER_LOGFILE_STAGING)\n for name in projects:\n self.delete_project(name.replace(\" \", \"-\"))", "def init_src(config):\n new_py = new_hark = None\n\n os.makedirs(str(config.project.python_src), exist_ok=True)\n\n py_init = config.project.python_src / \"__init__.py\"\n if not py_init.exists():\n with open(py_init, \"w\") as f:\n f.write(\"\")\n new_py = py_init\n\n if not config.project.hark_file.exists():\n with open(config.project.hark_file, \"w\") as f:\n main = 'fn main() {\\n print(\"Hello World!\");\\n}\\n'\n f.write(f\"// Something great begins here.\\n\\n\\n{main}\")\n new_hark = config.project.hark_file\n\n return new_py, new_hark", "def testWriteSourceFiles(self):\n source_files = ['test.c']\n\n file_writer = writers.VS2008ProjectFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteSourceFiles(source_files)\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b'\\t\\t<Filter\\r\\n'\n b'\\t\\t\\tName=\"Source Files\"\\r\\n'\n b'\\t\\t\\tFilter=\"cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx\"\\r\\n'\n b'\\t\\t\\tUniqueIdentifier=\"{4FC737F1-C7A5-4376-A066-2A32D752A2FF}\"\\r\\n'\n b'\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t<File\\r\\n'\n b'\\t\\t\\t\\tRelativePath=\"test.c\"\\r\\n'\n b'\\t\\t\\t\\t>\\r\\n'\n b'\\t\\t\\t</File>\\r\\n'\n b'\\t\\t</Filter>\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def combine(self):\n\n import re\n \n print 'Creating file', self.__filename\n \n bname = (os.path.split(self.__filename))[1]\n bname2 = bname\n \n # bugfix: if file contains characters like +,.,[]\n # properly escape them, otherwise re will fail to match.\n for a, b in zip(['+', '.', '[', ']','$', '(', ')'],\n ['\\+','\\.','\\[','\\]','\\$', '\\(', '\\)']):\n bname2 = bname2.replace(a, b)\n \n chunkre = re.compile(bname2 + '-' + '[0-9]+')\n \n chunkfiles = []\n for f in os.listdir(\".\"):\n print f\n if chunkre.match(f):\n chunkfiles.append(f)\n\n\n print 'Number of chunks', len(chunkfiles), '\\n'\n chunkfiles.sort(self.sort_index)\n\n data=''\n for f in chunkfiles:\n\n try:\n print 'Appending chunk', os.path.join(\".\", f)\n data += open(f, 'rb').read()\n except (OSError, IOError, EOFError), e:\n print e\n continue\n\n try:\n f = open(bname, 'wb')\n f.write(data)\n f.close()\n except (OSError, IOError, EOFError), e:\n raise FileSplitterException, str(e)\n\n print 'Wrote file', bname" ]
[ "0.5839572", "0.5629381", "0.545824", "0.54164666", "0.5395968", "0.53366303", "0.5248026", "0.521077", "0.520225", "0.52008224", "0.5186133", "0.5172932", "0.5105603", "0.5100697", "0.50301254", "0.5015693", "0.50072145", "0.5003615", "0.49936536", "0.49641842", "0.49630165", "0.49590594", "0.4942837", "0.49017885", "0.48973635", "0.4892944", "0.48718998", "0.48577693", "0.48537844", "0.48451802" ]
0.61515033
0
convert RGB image to YIQ image using transform matrix
def rgb2yiq(imRGB): trans = np.array([[0.299, 0.587, 0.114], [0.596, -0.275, -0.321], [0.212, -0.523, 0.311]]) return np.dot(imRGB, trans)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rgb2yiq(imRGB):\n return np.dot(imRGB, TRANSFORM.T.copy())", "def transformRGB2YIQ(imgRGB: np.ndarray) -> np.ndarray:\r\n YIQ_from_RGB = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n YIQImg = np.ndarray(imgRGB.shape)\r\n\r\n YIQImg[:, :, 0] = YIQ_from_RGB[0,0] * imgRGB[:, :, 0] + YIQ_from_RGB[0,1] * imgRGB[:, :, 1] + YIQ_from_RGB[0,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 1] = YIQ_from_RGB[1,0] * imgRGB[:, :, 0] + YIQ_from_RGB[1,1] * imgRGB[:, :, 1] + YIQ_from_RGB[1,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 2] = YIQ_from_RGB[2,0] * imgRGB[:, :, 0] + YIQ_from_RGB[2,1] * imgRGB[:, :, 1] + YIQ_from_RGB[2,2] * imgRGB[:, :, 2]\r\n\r\n return YIQImg", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(TRANSFORM).T.copy())", "def rgb2yiq(im_rgb):\n return multiply_by_left_matrix(YIQ_MATRIX, im_rgb)", "def rgb2yiq(imRGB):\n return np.dot(imRGB, np.array(MATRIX).T)", "def transformYIQ2RGB(imgYIQ: np.ndarray) -> np.ndarray:\r\n yiq_from_rgb = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n rgb_from_yiq = np.linalg.inv(yiq_from_rgb)\r\n\r\n RGBImg = np.ndarray(imgYIQ.shape)\r\n\r\n RGBImg[:, :, 0] = rgb_from_yiq[0,0] * imgYIQ[:, :, 0] + rgb_from_yiq[0,1] * imgYIQ[:, :, 1] + rgb_from_yiq[0,2] * imgYIQ[:, :, 2]\r\n RGBImg[:, :, 1] = rgb_from_yiq[1,0] * imgYIQ[:, :, 0] + rgb_from_yiq[1,1] * imgYIQ[:, :, 1] + rgb_from_yiq[1,2] * imgYIQ[:, :, 2]\r\n RGBImg[:, :, 2] = rgb_from_yiq[2,0] * imgYIQ[:, :, 0] + rgb_from_yiq[2,1] * imgYIQ[:, :, 1] + rgb_from_yiq[2,2] * imgYIQ[:, :, 2]\r\n\r\n return RGBImg", "def yiq2rgb(imYIQ):\n return np.dot(imYIQ, np.linalg.inv(np.array(MATRIX).T))", "def yiq2rgb(im_yiq):\n return multiply_by_left_matrix(np.linalg.inv(YIQ_MATRIX), im_yiq)", "def rgb2yiq(imRGB):\n return __image_color_conversion(imRGB, RGB_TO_YIQ_MATRIX)", "def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)", "def yiq2rgb(imYIQ):\n trans = np.array([[1, 0.956, 0.62], [1, -0.272, -0.647], [1, -1.108, 1.705]])\n return np.dot(imYIQ, trans)", "def _convert_to_yolo_img(self, img):\n\n img = img / 255.0\n h, w, c = img.shape\n img = img.transpose(2, 0, 1)\n outimg = make_image(w, h, c)\n img = img.reshape((w*h*c))\n data = c_array(c_float, img)\n outimg.data = data\n rgbgr_image(outimg)\n return outimg", "def transform(self, previousimage):", "def transform_image(self):\n im = cv2.imread(\"result.png\", 0)\n im2 = cv2.resize(im, (28, 28))\n im = im2.reshape(28, 28, -1)\n im = im.reshape(1, 1, 28, 28)\n im = cv2.bitwise_not(im)\n im = im.reshape(28,28)\n \n with out:\n clear_output()\n \n # resize\n img = np.array(im)\n img = img.reshape(28*28,)\n \n #img = img/255.0\n \n return img", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def image_transform(im_bytes):\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]", "def yiq2rgb(imYIQ):\n return __image_color_conversion(imYIQ, YIQ_TO_RGB_MATRIX)", "def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')", "def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\n cr: torch.Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)", "def rgb2yuv(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)", "def transform_images(img1,img2):", "def rgb_to_ycbcr(image: np.ndarray) -> np.ndarray:\n\n \"\"\" from RGB (0-1).\n \"\"\"\n\n if not is_rgb(image):\n raise ValueError(\"Input needs to be an array of RGB values\")\n\n m = np.array(\n [\n [+065.481, +128.553, +024.966],\n [-037.797, -074.203, +112.000],\n [+112.000, -093.786, -018.214],\n ]\n )\n a = np.array([16, 128, 128])\n\n return np.dot(image, m.T) + a", "def apply_transform_matrix(self, img: np.ndarray, transform_matrix):\n h, w = img.shape[0], img.shape[1]\n transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)\n img = np.rollaxis(img, 2, 0)\n final_affine_matrix = transform_matrix[:2, :2]\n final_offset = transform_matrix[:2, 2]\n\n channel_images = [scipy.ndimage.interpolation.affine_transform(\n x_channel,\n final_affine_matrix,\n final_offset,\n order=1,\n mode=self.fill_mode,\n cval=self.cval) for x_channel in img]\n img = np.stack(channel_images, axis=0)\n img = np.rollaxis(img, 0, 2 + 1)\n # img = apply_affine_transform(img, transform_matrix, channel_axis=2, fill_mode=self.fill_mode, cval=self.cval) # apply_transform\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def image_transform(im, format='pytorch'):\n if format == 'pytorch':\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n im = im.astype(np.float32)\n im = im / 255.\n im = (im - np.array([0.485, 0.456, 0.406], dtype=np.float32) ) / \\\n np.array([0.229, 0.224, 0.225], dtype=np.float32)\n elif format == 'caffe':\n # return BGR image\n im = im.astype(np.float32)\n im = im - np.array([103.939, 116.779, 123.68], dtype=np.float32)\n return im", "def rgb_to_ycbcr(img):\n\n T = np.array([\n [0.256788235294118, -0.148223529411765, 0.439215686274510],\n [0.504129411764706, -0.290992156862745, -0.367788235294118],\n [0.097905882352941, 0.439215686274510, -0.071427450980392],\n ], dtype=np.float64)\n\n O = np.array([16, 128, 128], dtype=np.float64)\n\n img = img.astype(np.float64)\n res = np.matmul(img, T) + O\n res = res.clip(0, 255).round().astype(np.uint8)\n\n return res", "def MakeCarToImageTransform(pixels_per_meter, image_ref_x, image_ref_y,\n flip_axes):\n ppm1 = 0. if flip_axes else pixels_per_meter\n ppm2 = -pixels_per_meter if flip_axes else 0.\n # pyformat: disable\n car_to_image_transform = np.array([\n [ppm1, ppm2, 0., image_ref_x],\n [ppm2, ppm1, 0., image_ref_y],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.]])\n # pyformat: enable\n return car_to_image_transform", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img" ]
[ "0.7917481", "0.74076706", "0.73373646", "0.73334646", "0.73267543", "0.7238105", "0.7093386", "0.7060213", "0.6968569", "0.6862498", "0.6690507", "0.6534706", "0.6488643", "0.6380209", "0.6290675", "0.62476933", "0.6240027", "0.6191612", "0.61170465", "0.61043394", "0.60982007", "0.6051396", "0.60096234", "0.5976754", "0.5976754", "0.5970135", "0.59599453", "0.5926539", "0.59140635", "0.59128076" ]
0.7516433
1
performs quantization loop until the process converges or at most n_iter rounds
def perform_quantization_loop(z, q, n_iter, hist, bins): error = [] current_error = 0 for i in range(n_iter): sigma = np.zeros(len(q)) for j in range(len(q)): p_z = hist[z[j]:z[j+1]+1] Z = bins[z[j]:z[j+1]+1] divided = sum(p_z*Z) divisor = sum(p_z) q[j] = divided / divisor Q = np.array([q[j]]*Z.size) sigma[j] = sum( (Q - Z) * (Q - Z) * p_z) if current_error == sum(sigma): # process converged break else: current_error = sum(sigma) error.append(current_error) for j in range(1, len(z) - 1): z[j] = (q[j-1] + q[j]) / 2 return z, q, error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def quantize(im_orig, n_quant, n_iter):\n rgb, y, im_yiq = check_rgb(im_orig)\n hist_orig, bin_edges = np.histogram(y*(BITS - 1), BITS, (0, BITS - 1))\n z = np.concatenate([[0], initial_z(hist_orig, n_quant), [(BITS-1)]])\n q = find_q(z, hist_orig)\n error = [calc_err(q, z, hist_orig)]\n for i in range(n_iter - 1): # first iteration was already done\n z_new = find_z(q)\n if np.array_equal(z_new, z): # stop if the z and q vectors are already optimal\n break\n q = find_q(z_new, hist_orig)\n z = z_new\n error.append(calc_err(q, z, hist_orig))\n lut = im_lut(q, z)\n im_quant = lut[(y*(BITS - 1)).astype(np.uint8)].astype(np.uint8) # calculate quantized image.\n im_quant = gray2rgb(rgb, im_quant, im_yiq) # if the original image was RGB then convert back to RGB\n return im_quant, error", "def iterate(self, iteration_loops, print_every_n):\n current_policy_function = lambda state_input: random.randint(0,self.nA-1) # Initialize with random policy\n current_Q = None\n iterations_completed = 0\n list_of_Q_dicts = [] # Maintain Qs throughout for comparison later\n while iterations_completed < iteration_loops:\n current_Q = self.policy_evaluator.evaluate(policy_function=current_policy_function, existing_Q=current_Q) # Use policy evaluator to update action-value function\n current_policy_function = self.epsilon_greedily_update_policy(current_Q=current_Q,\n iterations_completed=iterations_completed) # Use new Q function to epsilon greedily get new policy\n if iterations_completed % print_every_n == 0:\n print(\"Completed {} full policy iterations and saved most recent policy\".format(iterations_completed))\n print(current_Q)\n list_of_Q_dicts.append(deepcopy(current_Q))\n iterations_completed += 1\n return list_of_Q_dicts", "def every_n_iters(self, runner: Runner, n: int):\n if runner.iter < self.start_iter:\n return True\n return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False", "def _iterate_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if mixture_size is None:\n return 2 ** self.Ns\n else:\n return scipy.special.comb(self.Ns, mixture_size, exact=True)", "def convergence_processor(self):\n while True:\n rexp = (yield)\n self.converged = True\n self.converged_time = int(rexp.group(2))", "def run(self):\n # print(\"111\"+\"--- %s seconds ---\" % (time.time() ))\n err = self.params.tolerance + 1\n for iter_num in range(self.params.max_iter):\n if err <= self.params.tolerance:\n break\n # print(\"11\"+str(iter_num)+\"--- %s seconds ---\" % (time.time() - start_time))\n qprev = self.sigma2\n\n self._expectation_iter(iter_num)\n self._maximization_iter(iter_num)\n\n if self.sigma2 <= 0:\n self.sigma2 = self.params.tolerance / 10\n err = np.abs(self.sigma2 - qprev)\n\n if callable(self.callback):\n kwargs = {\n 'iteration': iter_num,\n 'error': err,\n 'X': self.X,\n 'Y': self.TY,\n 'W': self.W,\n 'P': self.P\n }\n self.callback(**kwargs)\n return self.TY", "def iterations(self):\n i = 0\n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n n = len(self.model.stateVector)\n self.answer = None\n \n while ((i < self.maxiter) \n and (stateVectorConv > self.stateVectorConvThreshold)\n ):\n \n F, K = self.model()\n \n if np.any(np.isnan(F)) or np.any(np.isnan(K)):\n m = \"Iteration {0} failure of model.\"\n raise OptimalEstimationException(m.format(i))\n \n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n m = \"Iteration {0} failure in decomposition.\"\n raise OptimalEstimationException(m.format(i))\n \n statevectorOffset = (self.V.T * self.priorSinvh * \n np.matrix(np.array(self.model.stateVector) - np.array(self.model.prior) ).T)\n measurementOffset = (self.U.T * self.errSinvh * \n np.matrix(self.model.observation - F).T)\n \n newState = np.matrix((self.w * \n (measurementOffset.A1 + \n self.w * statevectorOffset.A1))/(self.w**2+1.0)).T\n newState = self.priorSh * self.V * newState\n newState = newState.A1 + self.model.prior\n \n stateVectorConv = ((np.matrix(newState - self.model.stateVector) * \n self.Sinv * np.matrix(newState - self.model.stateVector).T)/n)[0,0]\n self.model.stateVector = newState\n\n if i == 0:\n \n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n \n print('cost Function for iteration {}:'.format(i), self.costFunction)\n\n i += 1\n \n F, K = self.model()\n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n raise OptimalEstimationException(\"Failure in decomposition.\")\n \n Wplus2 = np.matrix(np.diag(1.0/(self.w**2+1.0)))\n self.model.covariance = (self.priorSh * self.V * Wplus2 * \n self.V.T * self.priorSh)\n \n\n \n return i, stateVectorConv", "def algorithm(self):\n convergence_threshold = 50\n reward_num_threshold = 300\n alpha = 1\n gamma = 0.5\n while (self.reward_num < reward_num_threshold) and (self.count<convergence_threshold):\n print('------')\n print('Iteration', self.reward_num, '/', reward_num_threshold)\n print('Iterations w/out Q-update:', self.count, '/', convergence_threshold)\n # select a possible action (any of them; all are valid)\n s = self.get_state_num()\n print(\"Initial state:\", s)\n a = random.choice(np.arange(3))\n self.apply_action(a)\n while self.reward == None:\n #print(\"Sleeping to wait for reward\")\n rospy.sleep(0.5)\n reward = self.reward\n print(\"REWARD =\", reward)\n self.reward = None\n if reward == 0:\n next_state = self.get_state_num()\n mx = np.amax(self.Q[next_state])\n else:\n ## There is no next state if nonzero reward seen\n mx = 0\n update = self.Q[s][a] + alpha*(reward+gamma*mx-self.Q[s][a])\n if self.Q[s][a] != update:\n print(\"Update Q matrix\")\n self.Q[s][a] = update\n self.count = 0\n else:\n self.count += 1\n\n print(\"Finished calculating Q-Matrix\\n\\n\\n\\n\\n\\n\\n\")", "def quantize(im_orig, n_quant, n_iter):\n color_flag = False\n image = im_orig\n error = list()\n\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n if np.all(image <= 1):\n image *= NORMALIZE\n my_hist, bins = np.histogram(image, 256, (0,255))\n hist_cum = np.cumsum(my_hist)\n\n\n\n z_array = np.array([0]*(n_quant+1)) #init the z_array\n z_array[0] = 0 #minimal value\n z_array[-1] = 255 #maximal value\n\n q_array = np.zeros(n_quant) #init the q_array\n pixel_per_z = (hist_cum[-1] / n_quant)\n\n\n\n\n for i in range(1, n_quant): #Getting the z_array (not optimal)\n z_array[i] =np.argwhere(hist_cum>=(pixel_per_z*i)).astype(np.uint8)[0][0] #first element to be true\n\n g = np.arange(256)\n\n for index in range(n_iter):\n z_copy = z_array.copy()\n\n errors_per_iter = np.zeros(n_quant)\n\n for i in range(n_quant): #q calculation\n start = (z_array[i])+1\n end = (z_array[i+1] + 1)\n hist_work = my_hist[start:end]\n g_work = g[start:end]\n sum_up = np.sum(g_work * hist_work) # g*hist\n sum_down = np.sum(hist_work)\n if sum_down!=0:\n q_array[i] = sum_up/sum_down\n else:\n q_array[i] = 0\n\n for i in range(n_quant): # error calculating after optimisation of z\n start = int(z_array[i])+1\n end = int(z_array[i + 1]) + 1\n err = np.sum(((np.around(q_array[i]) - g[start:end]) ** 2) * my_hist[start:end])\n errors_per_iter[i] = err\n error.append(np.sum(errors_per_iter))\n\n for i in range(1, n_quant): #First and last element already defined\n z_array[i] = ((q_array[i-1]) + (q_array[i])) / 2 #optimization of the z parts\n\n if np.array_equal(z_array, z_copy):\n break\n\n\n\n\n\n\n look_up_table = np.array([]) #create look up table\n look_up_table = np.append(look_up_table, [q_array[0]])\n\n for i in range(1, 1 + n_quant):\n num = q_array[i-1]\n array_use = np.array([num] * int(z_array[i] - z_array[i-1]))\n temp_array = np.append(look_up_table, array_use) #fill the look up table\n look_up_table = temp_array\n\n look_up_table = np.append(look_up_table, [q_array[-1]])\n\n im_quant = look_up_table[image.astype(np.uint8)]\n im_quant /= NORMALIZE\n\n if color_flag:\n y_im[:, :, 0] = im_quant\n im_quant = yiq2rgb(y_im)\n\n return [im_quant, error]", "def value_iteration(vision, n_tongs):\n q_value = np.zeros([11, 11, 4, 3], dtype=np.float32)\n for _ in range(1000):\n delta = 0\n for px in range(11):\n for py in range(11):\n item = recognize_item(px, py, vision)\n # Not update for locations collecting items, to avoid endless loops.\n if (item == 'diamond' and n_tongs > 0) or item == 'tongs' or item == 'jellybean':\n continue\n q_new = np.zeros([4, 3], dtype=np.float32)\n for d in range(4):\n for a in range(3):\n px_next, py_next, d_next, r = simulate_step(px, py, d, a, vision, n_tongs)\n q_new[d, a] = r\n if 0 <= px_next < 11 and 0 <= py_next < 11: # next position in vision\n q_new[d, a] += np.max(q_value[px_next, py_next, d_next])\n delta = max(delta, norm(q_new - q_value[px, py], ord=np.inf))\n q_value[px, py] = q_new\n if delta < 1e-3:\n break\n return q_value", "def monte_carlo_trials(nb_trials, nb_ok, lock):\n\n # First perform the trials\n # Do not use shared resource because other processes doesn't need to know\n # about computation step\n nb_in_quarter_results = 0\n for i in range(nb_trials):\n x = random.uniform(0, 1)\n y = random.uniform(0, 1)\n if x * x + y * y <= 1.0:\n nb_in_quarter_results += 1\n\n # Finally update shared resource\n # Do it only once, then processes doesn't struggle with each other to\n # update it\n with lock:\n nb_ok.value += nb_in_quarter_results", "def quantize(im_orig, n_quant, n_iter):\n img = get_gray_channel(im_orig)\n img = float2int(img)\n\n hist, bins = np.histogram(img, bins=np.arange(MAX_VALUE + 1))\n hist_times_color = hist * np.arange(MAX_VALUE)\n z = guess_first_z(n_quant, hist)\n error, q = [], []\n for i in range(0, n_iter):\n q = calculate_q(z, hist, hist_times_color)\n\n new_z = [0]\n for j in range(1, n_quant):\n new_z.append((q[j - 1] + q[j]) // 2)\n new_z.append(MAX_VALUE - 1)\n\n error.append(calculate_error(hist, new_z, q))\n\n if np.array_equal(z, new_z):\n break\n z = new_z\n\n lut = np.zeros(MAX_VALUE)\n for i in range(0, len(q)):\n lut[z[i]:z[i + 1]] = q[i]\n lut[MAX_VALUE - 1] = q[len(q) - 1]\n \n im_quant = lut[img.astype(int)]\n im_quant = int2float(im_quant)\n im_quant = update_gray_channel(im_orig, im_quant)\n \n return im_quant, error", "def realTryHard(g, n, verbose=False, graphname=\"\"):\r\n tot = None\r\n for i in range(n):\r\n # print(\"Starting batch \", i)\r\n res = tryEverything(g, verbose, graphname)\r\n if tot is None:\r\n tot = res\r\n else:\r\n for j in res:\r\n tot[j][1] += res[j][1]\r\n for j in tot:\r\n tot[j][1] /= n\r\n tot[j][1] = round(tot[j][1], 3)\r\n return tot", "def test_poisson_stretch(self):\n\n for n in range(0, 10):\n expect = 5 * random.random()\n poissonvar = poisson_stretch(n, expect)\n for k in range(1, n + 1):\n self.assertTrue(math.isclose(poissonvar[k] / poissonvar[0],\n expect ** k / math.factorial((k)),\n rel_tol=1e-05,\n abs_tol=1.0))", "def repeat_expt(epsilon, gamma,\n result_nonprivate, \n repetitions=10,\n outfile_singles=None, outfile_aggregates=None, \n data_blocker=1, windsorized=False):\n \n \n blocker = gupt.GuptRunTime.get_data_blockers()[data_blocker-1]\n # 1 NaiveDataBlocker\n # 2 ResamplingDataBlockerConstantSize \n # 3 ResamplingDataBlockerConstantBlocks\n\n if not windsorized:\n DP_mode=\"standard_DP\"\n else:\n DP_mode=\"windsorized_DP\"\n\n logger.info(\"Running %d repetitions with data_blocker=%s\" % (repetitions, blocker))\n logger.info(\"epsilon=%s gamma=%s, in mode %s\" % (epsilon, gamma, DP_mode))\n \n results, starttime = [], time.clock()\n \n # results = pickle.load( open( \"res.pickle\", \"rb\" ))\n \n \n for i in range(repetitions):\n\n # TODO: Perhaps they DO or DO NOT have to be recreated in each run?\n blocker = gupt.GuptRunTime.get_data_blockers()[data_blocker-1]\n reader = censusdatadriver.get_reader()\n runtime = gupt.GuptRunTime(MeanComputer, reader, epsilon, \n blocker_name=blocker, blocker_args=gamma)\n # end TODO\n\n if not windsorized:\n res=runtime.start()\n else:\n res=runtime.start_windsorized()\n \n # artificial 2nd dimension, just for testing these routines:\n # res = res + res\n \n print report_results(res, result_nonprivate, DP_mode, blocker, \n epsilon, gamma, outfile_singles)\n sleep_short()\n \n results.append(res)\n\n # pickle.dump(results, open( \"res.pickle\", \"wb\" ) )\n \n \n duration = time.clock() - starttime\n logger.info(\"%d repetitions took %.2f seconds\" % (repetitions, duration))\n \n mean, std = analyze_results(results) # , result_nonprivate)\n \n print report_results_repeated(mean, std, DP_mode, blocker,\n epsilon, gamma, repetitions,\n outfile=outfile_aggregates)", "def iterate(self, num, nsimga_start=None, nsigma_step=0.01, max_iter=1000):\n if nsimga_start is None:\n nsimga_start = self._nsigma\n len_codes = 0\n step = 0\n while len_codes < num and step < max_iter:\n self.generate(nsimga_start)\n len_codes = len(self._result)\n nsimga_start -= nsigma_step\n step += 1\n print(\"Final nsigma: \", nsimga_start)\n print(\"Iterations : \", step)", "def train_q(n=1000):\n for i in range(50):\n p1_strategy = strategies.QStrategy('X')\n p2_strategy = strategies.QStrategy('O')\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_many(n)\n p1.strategy.save_q()\n p2.strategy.save_q()", "def iterations(self, n_iter):\n self.n_iter = n_iter", "def iterations(self, n_iter):\n self.n_iter = n_iter", "def iterations(self, n_iter):\n self.n_iter = n_iter", "def iterations(self, n_iter):\n self.n_iter = n_iter", "def factorial_loop(n):\n\n pass # @todo -fix this", "def random_v_q(n=1000):\n for i in range(5):\n p1_strategy = strategies.RandomStrategy()\n p2_strategy = strategies.QStrategy('O')\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_many(n)\n #p2.strategy.save_q()\n # Do not save gameplay against random opponent", "def iterate(self,N = None):\n result = self.iterate_loop(N)\n #self.writeToFile()\n\n #TODO: We need a timeout for really long executions, but it won't work because it opens another blender instance! Fix this!\n \"\"\"\n queue = multiprocessing.Queue(1) # Maximum size is 1\n proc = multiprocessing.Process(target=self.iterate_wrapper, args=(self, queue, N))\n proc.start()\n\n # Wait for TIMEOUT seconds\n try:\n result = queue.get(True, TIMEOUT)\n except Queue.Empty:\n # Deal with lack of data somehow\n result = None\n print(\"TIMEOUT reached! The pString is too long!\")\n finally:\n proc.terminate()\n \"\"\"\n return result", "def q_v_random(n=1000):\n for i in range(5):\n p1_strategy = strategies.QStrategy('X')\n p2_strategy = strategies.RandomStrategy()\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_many(n)\n #p1.strategy.save_q()\n # Do not save gameplay against random opponent", "def algorithm_loop(self):", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n counter = 0\n term1 = 3\n term2 = 2\n term3 = 1\n loop = n-3\n\n if n<=3:\n return n\n\n while counter<loop:\n term1,term2,term3=term1+2*term2+3*term3,term1,term2\n counter +=1\n return term1" ]
[ "0.62470347", "0.62403727", "0.6138393", "0.61302924", "0.61291933", "0.5994922", "0.5904798", "0.5871222", "0.5869976", "0.5865474", "0.58552843", "0.58093476", "0.57621", "0.5730274", "0.5709154", "0.56889963", "0.56818104", "0.5676366", "0.5656434", "0.563972", "0.563972", "0.563972", "0.563972", "0.56122184", "0.55561143", "0.5554762", "0.55349356", "0.5529178", "0.5518284", "0.5502769" ]
0.6888727
0
performs optimal quantization of a given grayscale or RGB image
def quantize(im_orig, n_quant, n_iter): shape_len = len(im_orig.shape) if shape_len == 2: # grayscale return quantization_helper(im_orig, n_quant, n_iter) elif shape_len == 3: # rgb im_yiq = rgb2yiq(im_orig) y = im_yiq[:, :, 0] y_quant, error = quantization_helper(y, n_quant, n_iter) y_quant = y_quant/ 255 im_yiq[:, :, 0] = y_quant im_quants = yiq2rgb(im_yiq) return im_quants, error
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quantize(im_orig, n_quant, n_iter):\n rgb, y, im_yiq = check_rgb(im_orig)\n hist_orig, bin_edges = np.histogram(y*(BITS - 1), BITS, (0, BITS - 1))\n z = np.concatenate([[0], initial_z(hist_orig, n_quant), [(BITS-1)]])\n q = find_q(z, hist_orig)\n error = [calc_err(q, z, hist_orig)]\n for i in range(n_iter - 1): # first iteration was already done\n z_new = find_z(q)\n if np.array_equal(z_new, z): # stop if the z and q vectors are already optimal\n break\n q = find_q(z_new, hist_orig)\n z = z_new\n error.append(calc_err(q, z, hist_orig))\n lut = im_lut(q, z)\n im_quant = lut[(y*(BITS - 1)).astype(np.uint8)].astype(np.uint8) # calculate quantized image.\n im_quant = gray2rgb(rgb, im_quant, im_yiq) # if the original image was RGB then convert back to RGB\n return im_quant, error", "def quantize(im_orig, n_quant, n_iter):\n img = get_gray_channel(im_orig)\n img = float2int(img)\n\n hist, bins = np.histogram(img, bins=np.arange(MAX_VALUE + 1))\n hist_times_color = hist * np.arange(MAX_VALUE)\n z = guess_first_z(n_quant, hist)\n error, q = [], []\n for i in range(0, n_iter):\n q = calculate_q(z, hist, hist_times_color)\n\n new_z = [0]\n for j in range(1, n_quant):\n new_z.append((q[j - 1] + q[j]) // 2)\n new_z.append(MAX_VALUE - 1)\n\n error.append(calculate_error(hist, new_z, q))\n\n if np.array_equal(z, new_z):\n break\n z = new_z\n\n lut = np.zeros(MAX_VALUE)\n for i in range(0, len(q)):\n lut[z[i]:z[i + 1]] = q[i]\n lut[MAX_VALUE - 1] = q[len(q) - 1]\n \n im_quant = lut[img.astype(int)]\n im_quant = int2float(im_quant)\n im_quant = update_gray_channel(im_orig, im_quant)\n \n return im_quant, error", "def quantize(im_orig, n_quant, n_iter):\n color_flag = False\n image = im_orig\n error = list()\n\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n if np.all(image <= 1):\n image *= NORMALIZE\n my_hist, bins = np.histogram(image, 256, (0,255))\n hist_cum = np.cumsum(my_hist)\n\n\n\n z_array = np.array([0]*(n_quant+1)) #init the z_array\n z_array[0] = 0 #minimal value\n z_array[-1] = 255 #maximal value\n\n q_array = np.zeros(n_quant) #init the q_array\n pixel_per_z = (hist_cum[-1] / n_quant)\n\n\n\n\n for i in range(1, n_quant): #Getting the z_array (not optimal)\n z_array[i] =np.argwhere(hist_cum>=(pixel_per_z*i)).astype(np.uint8)[0][0] #first element to be true\n\n g = np.arange(256)\n\n for index in range(n_iter):\n z_copy = z_array.copy()\n\n errors_per_iter = np.zeros(n_quant)\n\n for i in range(n_quant): #q calculation\n start = (z_array[i])+1\n end = (z_array[i+1] + 1)\n hist_work = my_hist[start:end]\n g_work = g[start:end]\n sum_up = np.sum(g_work * hist_work) # g*hist\n sum_down = np.sum(hist_work)\n if sum_down!=0:\n q_array[i] = sum_up/sum_down\n else:\n q_array[i] = 0\n\n for i in range(n_quant): # error calculating after optimisation of z\n start = int(z_array[i])+1\n end = int(z_array[i + 1]) + 1\n err = np.sum(((np.around(q_array[i]) - g[start:end]) ** 2) * my_hist[start:end])\n errors_per_iter[i] = err\n error.append(np.sum(errors_per_iter))\n\n for i in range(1, n_quant): #First and last element already defined\n z_array[i] = ((q_array[i-1]) + (q_array[i])) / 2 #optimization of the z parts\n\n if np.array_equal(z_array, z_copy):\n break\n\n\n\n\n\n\n look_up_table = np.array([]) #create look up table\n look_up_table = np.append(look_up_table, [q_array[0]])\n\n for i in range(1, 1 + n_quant):\n num = q_array[i-1]\n array_use = np.array([num] * int(z_array[i] - z_array[i-1]))\n temp_array = np.append(look_up_table, array_use) #fill the look up table\n look_up_table = temp_array\n\n look_up_table = np.append(look_up_table, [q_array[-1]])\n\n im_quant = look_up_table[image.astype(np.uint8)]\n im_quant /= NORMALIZE\n\n if color_flag:\n y_im[:, :, 0] = im_quant\n im_quant = yiq2rgb(y_im)\n\n return [im_quant, error]", "def image_quality(img):\n # convert bgr image to gray -> float32\n score = 0.0\n if img is None:\n return score\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n x = gray.astype(np.float32)\n h, w = x.shape[0], x.shape[1]\n\n # horizontal\n d_h = x[:,1:] - x[:,:-1]\n w_bound = int(8*(np.floor(w/8.0)-1)) + 1\n B_h = np.mean(np.abs(d_h[:,7:w_bound:8]))\n A_h = (8.0 * np.mean(np.abs(d_h)) - B_h) / 7.0\n sig_h = np.sign(d_h)\n left_sig, right_sig = sig_h[:,:-2], sig_h[:,1:-1]\n Z_h = np.mean((left_sig * right_sig)<0)\n\n # vertical\n d_v = x[1:, :] - x[:-1, :]\n h_bound = int(8*(np.floor(h/8.0)-1)) + 1\n B_v = np.mean(np.abs(d_v[7:h_bound:8, :]))\n A_v = (8.0 * np.mean(np.abs(d_v)) - B_v) / 7.0\n sig_v = np.sign(d_v)\n up_sig, down_sig = sig_v[:-2, :], sig_v[1:-1, :]\n Z_v = np.mean((up_sig * down_sig)<0)\n\n # combine the weights\n B = (B_h + B_v)/2.0\n A = (A_h + A_v)/2.0\n Z = (Z_h + Z_v)/2.0\n\n # quality prediction\n alpha = -245.8909\n beta = 261.9373\n gamma1 = -239.8886 / 10000.0 \n gamma2 = 160.1664 / 10000.0 \n gamma3 = 64.2859 / 10000.0 \n\n # corner case of a black / white frame\n if np.abs(A) < 1e-3 or np.abs(B) < 1e-3 or np.abs(Z) < 1e-3:\n score = 0.0\n else:\n score = alpha + beta*(B**gamma1)*(A**gamma2)*(Z**gamma3)\n\n return score", "def quantize(im_orig, n_quant, n_iter):\n if im_orig.ndim == RGB_DIM:\n # Quantize only Y channel.\n im_yiq = rgb2yiq(im_orig)\n result = __gray_quantize(im_yiq[:, :, Y_CHANNEL], n_quant, n_iter)\n im_yiq[:, :, Y_CHANNEL] = result[IMAGE_LOCATION]\n\n # Convert back to RGB space.\n result[IMAGE_LOCATION] = yiq2rgb(im_yiq)\n return result\n # Otherwise, just quantize.\n return __gray_quantize(im_orig, n_quant, n_iter)", "def quantizeColor(bilateralFilter_img, a, ksize):\n medianBlur_img = cv2.medianBlur(bilateralFilter_img,ksize)\n [rows,cols,c] = medianBlur_img.shape\n quantizeColor_img = medianBlur_img\n for i in xrange(0,rows):\n for j in xrange(0,cols):\n pixel_b = medianBlur_img.item(i,j,0)\n pixel_g = medianBlur_img.item(i,j,1)\n pixel_r = medianBlur_img.item(i,j,2) \n pixel_b = math.floor(pixel_b/a)*a \n pixel_g = math.floor(pixel_g/a)*a\n pixel_r = math.floor(pixel_r/a)*a\n quantizeColor_img.itemset((i,j,0),pixel_b)\n quantizeColor_img.itemset((i,j,1),pixel_g)\n quantizeColor_img.itemset((i,j,2),pixel_r)\n\n return quantizeColor_img", "def quantization_helper(im, n_quant, n_iter):\n im *= (255 / im.max())\n hist, bins = np.histogram(im, bins=256, range=[0, 256])\n cumulative_hist = np.cumsum(hist)\n # initial division such that each segment will contain approximately the same number of pixels.\n num_of_pixels = cumulative_hist.max() / n_quant\n z = np.zeros(shape=n_quant + 1, dtype='int')\n for i in range(0, len(z) - 1):\n z[i] = np.argmin(np.absolute(cumulative_hist - num_of_pixels * (i)))\n\n z[len(z) - 1] = 255 # The first and last elements are 0 and 255 respectively.\n q = np.zeros(shape=n_quant, dtype='float64')\n\n z, q, error = perform_quantization_loop(z, q, n_iter, hist, bins)\n lookup_table = np.array([0]*256,dtype='float64')\n\n for i in range(n_quant):\n lookup_table[z[i]:z[i+1]] = q[i]\n\n im_quant = lookup_table[im.astype(int)]\n return im_quant, error", "def kmeansQuantise(image, quantisation_levels=64):\n K = quantisation_levels\n N_ATTEMPTS = 10\n MAX_ITER = 300\n TOL = 0.0001\n\n km = KMeans(\n n_clusters=K, n_init=N_ATTEMPTS, max_iter=MAX_ITER, tol=TOL,\n n_jobs=-1\n )\n\n z = np.float32(image.reshape(-1, 3))\n\n labels = km.fit_predict(z)\n centres = km.cluster_centers_\n\n res = centres[labels]/(256/K)\n res = res.astype(np.uint8)\n image_q = res.reshape((IMG_RES_M, IMG_RES_N, 3))\n\n return image_q", "def quantise(images, q_levels):\n return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')", "def quantise(images, q_levels):\n return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')", "def quantise(images, q_levels):\n return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')", "def quantization(image, x_bins, y_bins, h_bins, s_bins, v_bins, delta = 1e-6):\r\n\r\n\tassert image.ndim == 3\r\n\r\n\th, w = image.shape[0], image.shape[1]\r\n\r\n\timage = rgb_to_hsv(image / 255)\r\n\r\n\timage_list = image.tolist()\r\n\timage_list = [j for i in image_list for j in i]\r\n\r\n\t## generate the list of coordinates\r\n\t## note: the coordinates are 0-index based here\r\n\tcoordinates = [[i, j] for i in range(h) for j in range(w)]\r\n\r\n\t## merge hsv with coordinates\r\n\timage_list = list(zip(coordinates, image_list))\r\n\timage_list = [i[0] + i[1] for i in image_list]\r\n\r\n\t## normalize the coordinates\r\n\timage_list = [[i[0] / (h + delta - 1), i[1] / (w + delta - 1), i[2], i[3], i[4]] for i in image_list]\r\n\r\n\r\n\t## create the bins for quantization\r\n\tx_bin_array = np.linspace(0, 1, x_bins + 1)\r\n\ty_bin_array = np.linspace(0, 1, y_bins + 1)\r\n\th_bin_array = np.linspace(0, 1, h_bins + 1)\r\n\ts_bin_array = np.linspace(0, 1, s_bins + 1)\r\n\tv_bin_array = np.linspace(0, 1, v_bins + 1)\r\n\r\n\t## quantization of the values\r\n\tquantized_list = [(x_bin_array[np.digitize(np.abs(i[0] - delta), x_bin_array) - 1],\r\n\t\t\t\t\t y_bin_array[np.digitize(np.abs(i[1] - delta), y_bin_array) - 1],\r\n\t\t\t\t\t h_bin_array[np.digitize(np.abs(i[2] - delta), h_bin_array) - 1],\r\n\t\t\t\t\t s_bin_array[np.digitize(np.abs(i[3] - delta), s_bin_array) - 1],\r\n\t\t\t\t\t v_bin_array[np.digitize(np.abs(i[4] - delta), v_bin_array) - 1]) for i in image_list]\r\n\r\n\treturn quantized_list", "def quantize(image_patch, gray_levels=12, n_stddev=2):\n # compute gray level gaussian stats\n mean = np.mean(image_patch)\n stddev = np.std(image_patch)\n # logger.debug('mean: {!s}\\nstd dev: {!s}'.format(mean, stddev))\n bin_width = 2*n_stddev*stddev / (gray_levels-2)\n # logger.debug('bin_width: {!s}'.format(bin_width))\n\n # rebin values into new quanization, first and last bins hold outliers\n quantized_image_patch = np.zeros_like(image_patch, dtype=np.int8)\n it = np.nditer(image_patch, op_flags=['readwrite'], flags=['multi_index'])\n while not it.finished:\n val = image_patch[it.multi_index]\n quantized_image_patch[it.multi_index] = min(gray_levels-1, max(0, math.floor(((val - mean + n_stddev*stddev)/(bin_width+1e-9))+1)))\n it.iternext()\n\n # import matplotlib.pyplot as plt\n # xy_shape = quantized_image_patch.shape[1:]\n # for z in range(quantized_image_patch.shape[0]):\n # fig = plt.figure()\n # ax = fig.add_subplot(1,2,1)\n # ax.imshow(image_patch[z,:,:].reshape(xy_shape), cmap='gray')\n # ax = fig.add_subplot(1,2,2)\n # ax.imshow(quantized_image_patch[z,:,:].reshape(xy_shape), cmap='gray', vmin=0, vmax=gray_levels-1)\n # plt.show()\n return quantized_image_patch", "def reduceImage(img,N,M,n,m):\n scaleN = int(n/(2*N))\n scaleM = int(m/(2*M))\n imgR = np.zeros((2*N+1,2*M+1))\n for i in range(2*N+1):\n for j in range(2*M+1):\n if img[i*scaleN+2,j*scaleM+2,3] != 255:\n imgR[i,j] = 0.\n else: \n imgR[i,j] = 1.\n return imgR", "def quantizeImage(imgOrig:np.ndarray, nQuant:int, nIter:int)->(List[np.ndarray],List[float]):\r\n if isRGB(imgOrig):\r\n RGB=True\r\n imgYIQ = transformRGB2YIQ(imgOrig)\r\n Y=imgYIQ[:,:,0]\r\n unnormImg = unnormalize(Y).astype('int')\r\n else:\r\n RGB = False\r\n unnormImg = unnormalize(imgOrig).astype('int')\r\n\r\n img_lst=[imgOrig]\r\n err_lst=[]\r\n histOrig = calHist(unnormImg)\r\n h,w = unnormImg.shape[:2]\r\n\r\n partSize = (h* w) / nQuant\r\n z = [1]\r\n sum = 0\r\n for i in range(len(histOrig)):\r\n sum+=histOrig[i]\r\n if (sum>=partSize):\r\n z.append(i)\r\n sum=0\r\n\r\n z.append(255)\r\n\r\n for i in range(nIter):\r\n q = []\r\n for i in range(1,nQuant+1):\r\n cutHist=histOrig[z[i-1]:z[i]]\r\n avg=int(np.average(range(z[i-1], z[i]),axis=None, weights=cutHist, returned=False))\r\n q.append(avg)\r\n for i in range(1,nQuant):\r\n z[i]=int((q[i-1]+q[i])/2)\r\n\r\n img=np.zeros(unnormImg.shape)\r\n for i in range(0, nQuant):\r\n img[unnormImg>=z[i]]=q[i]\r\n errMat=pow((unnormImg-img),2)/(h*w)\r\n err=np.average(errMat)\r\n err_lst.append(err)\r\n\r\n\r\n if RGB:\r\n img = normalize(img)\r\n imgYIQ[:, :, 0] = img\r\n img = transformYIQ2RGB(imgYIQ)\r\n\r\n img_lst.append(img)\r\n\r\n\r\n return img_lst, err_lst", "def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams = None) -> sitk.Image:\n\n return sitk.RescaleIntensity(image, self.min_intensity, self.max_intensity)", "def quantizeRGB(origImg: np.ndarray, k: int) -> np.ndarray:\n\n ######################################################################################\n ## TODO: YOUR CODE GOES HERE ##\n ######################################################################################\n\n h, w = origImg.shape[0], origImg.shape[1]\n flat_img = origImg.reshape(h*w, 3)\n quantizedImg = np.zeros_like(flat_img)\n\n km = KMeans(n_clusters=k, random_state=101)\n km.fit_predict(flat_img)\n clusterCenterColors = km.cluster_centers_\n labels = km.labels_\n for i in range(len(labels)):\n quantizedImg[i] = clusterCenterColors[labels[i]]\n\n quantizedImg = np.floor(quantizedImg.reshape(h, w, 3)).astype(int)\n\n return quantizedImg, clusterCenterColors\n\n ######################################################################################\n ## YOUR CODE ENDS HERE ##\n ######################################################################################", "def do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture,sigma_min,sigma_max, downsample_value):\n if np.ndim(img)==3:\n features = extract_features(\n img,\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n else:\n features = extract_features(\n np.dstack((img,img,img)),\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n\n if mask is None:\n raise ValueError(\"If no classifier clf is passed, you must specify a mask.\")\n training_data = features[:, mask > 0].T\n\n training_data = memmap_feats(training_data)\n\n training_labels = mask[mask > 0].ravel()\n\n training_data = training_data[::downsample_value]\n training_labels = training_labels[::downsample_value]\n\n lim_samples = 100000 #200000\n\n if training_data.shape[0]>lim_samples:\n logging.info('Number of samples exceeds %i'% lim_samples)\n ind = np.round(np.linspace(0,training_data.shape[0]-1,lim_samples)).astype('int')\n training_data = training_data[ind,:]\n training_labels = training_labels[ind]\n logging.info('Samples have been subsampled')\n logging.info('Number of samples in training data: %i' % (training_data.shape[0]))\n print(training_data.shape)\n\n clf = make_pipeline(\n StandardScaler(),\n MLPClassifier(\n solver='adam', alpha=1, random_state=1, max_iter=2000,\n early_stopping=True, hidden_layer_sizes=[100, 60],\n ))\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Initializing MLP model')\n\n clf.fit(training_data, training_labels)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('MLP model fit to data')\n\n del training_data, training_labels\n\n logging.info('Create and memory map model input data')\n\n data = features[:, mask == 0].T\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n data = memmap_feats(data)\n logging.info('Memory mapped model input data')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n labels = clf.predict(data)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Model used on data to estimate labels')\n\n if mask is None:\n result = labels.reshape(img.shape[:2])\n result2 = result.copy()\n else:\n result = np.copy(mask)#+1\n result[mask == 0] = labels\n del labels, mask\n result2 = result.copy()\n del result\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF feature extraction and model fitting complete')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2", "def change_resolution(img):\n scale_factor = np.random.choice(list(range(0, 6, 2)))\n if scale_factor == 0:\n return img\n downsample = nn.AvgPool2d(scale_factor)\n upsample = nn.UpsamplingNearest2d(scale_factor=scale_factor)\n new_res_img = upsample(downsample(img.unsqueeze(dim=1))).squeeze()\n return new_res_img", "def computeQuantizationError(origImg: np.ndarray, quantizedImg: np.ndarray) -> int:\n ######################################################################################\n ## TODO: YOUR CODE GOES HERE ##\n ######################################################################################\n\n quantizationError = np.sum(np.square(origImg - quantizedImg))\n\n ######################################################################################\n ## YOUR CODE ENDS HERE ##\n ######################################################################################\n return quantizationError", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def _resize_algorithm(img, dim):\n\n if img is None:\n rimg = Image.BILINEAR\n elif img.size[0] > dim[0] or img.size[1] > dim[1]:\n rimg = Image.ANTIALIAS\n else:\n rimg = Image.BILINEAR\n\n return rimg", "def __call__(self, img: torch.Tensor) -> torch.Tensor:\n img_np: np.ndarray = img.detach().cpu().numpy()\n quantile: float = min(self.num_pixels / img_np.size, 1) \\\n if self.num_pixels is not None else self.quantile\n thresh: float = np.quantile(img_np, 1 - quantile)\n img = (img > thresh).float()\n return img", "def post_process_image(self, image):\r\n A = np.min(image)\r\n B = np.max(image)\r\n k = 255\r\n R, C = image.shape\r\n for i in range(R):\r\n for j in range(C):\r\n image[i][j] = (k / (B-A)) * (image[i][j] - A)\r\n average_image = np.average(image)\r\n if average_image > 50:\r\n return image.astype(dtype='uint8')\r\n else:\r\n return (255-image).astype(dtype='uint8')", "def create_signal_which_maximizes_activation(model, layer, filt, input_size,\n lr=0.1, opt_steps=100,\n upscaling_steps=5,\n upscaling_factor=2.0,\n color='black'):\n mpl.rcParams['text.color'] = color\n mpl.rcParams['axes.labelcolor'] = color\n mpl.rcParams['xtick.color'] = color\n mpl.rcParams['ytick.color'] = color\n\n img_var = torch.randn((1, 1, int(input_size * ((1 / upscaling_factor)**upscaling_steps))))\n activations = SaveFeatures(list(model.children())[layer])\n optimizer = torch.optim.Adam(\n [img_var.requires_grad_()], lr=lr, weight_decay=1e-6)\n loss_history = []\n\n for step in range(upscaling_steps + 1):\n for n in range(opt_steps):\n optimizer.zero_grad()\n model(img_var)\n loss = -activations.features[:, filt].mean()\n loss_history.append(loss)\n loss.backward()\n optimizer.step()\n\n if step < upscaling_steps:\n img_var = torch.nn.functional.interpolate(\n img_var, scale_factor=upscaling_factor, mode='linear')\n\n plt.figure(figsize=(20, 4))\n plt.plot(img_var.clone().detach().numpy()[0, 0])\n plt.title(\"Input which maximizes activation of layer: conv_{}, filter: {}\".format(\n layer + 1, filt), fontsize=22)\n plt.show()\n\n return img_var", "def apply_ki(image, accuracy=200, plot_j=False): # TODO: save fig to file?\n if not isinstance(image, np.ndarray):\n image = np.asarray(image)\n # Histogram \n h, bin_edges = np.histogram(image[~np.isnan(image)], bins=accuracy, density=True)\n bin_width = bin_edges[1]-bin_edges[0]\n g = np.arange(bin_edges[0]+bin_width/2.0, bin_edges[-1], bin_width)\n g_pos = g - np.min(g);\n g01 = g_pos / np.max(g_pos);\n \n # Cost function and threshold\n c = np.cumsum(h)\n m = np.cumsum(h * g01)\n s = np.cumsum(h * g01**2)\n cb = c[-1] - c\n mb = m[-1] - m\n sb = s[-1] - s\n c[c == 0] = 1e-9\n cb[cb == 0] = 1e-9\n var_f = s/c - (m/c)**2\n if np.any(var_f < 0):\n var_f[var_f < 0] = 0\n sigma_f = np.sqrt(var_f)\n var_b = sb/cb - (mb/cb)**2\n if np.any(var_b < 0):\n var_b[var_b < 0] = 0\n sigma_b = np.sqrt(var_b)\n p = c / c[-1]\n sigma_f[sigma_f == 0] = 1e-9\n sigma_b[sigma_b == 0] = 1e-9\n j = p * np.log(sigma_f) + (1-p)*np.log(sigma_b) - p*np.log(p) - (1-p)*np.log(1-p+1e-9)\n j[~np.isfinite(j)] = np.nan\n idx = np.nanargmin(j)\n t = g[idx]\n # Plot\n if plot_j:\n fig, ax = plt.subplots(2,1)\n ax[0].plot(g, j, color='k')\n ax[0].plot([t, t], [np.nanmin(j), np.nanmax(j)], 'r')\n ax[1].bar(g, h)\n ax[1].plot([t, t], [0, np.nanmax(h)], 'r')\n # Return\n return t", "def scale(img):\n result = [[1 if x > BINARY_THRESHOLD else 0 for x in row] for row in img]\n return result", "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "def transform_image_mnist(gray, target_size = (28, 28)):\n # gray\n gray = cv2.cvtColor(gray, cv2.COLOR_RGB2GRAY)\n _save_img_file(\"outputs/test_1_gray.png\", gray)\n\n # invert\n gray = 255-gray\n _save_img_file(\"outputs/test_1_gray_invert.png\", gray)\n \n # rescale it\n gray = cv2.resize(gray, target_size)\n _save_img_file('outputs/test_2_rescale.png',gray)\n\n # better black and white version\n gray = threshold(gray, \"mean\")\n _save_img_file('outputs/test_3_thresh.png',gray)\n\n while np.sum(gray[0]) == 0:\n gray = gray[1:]\n\n while np.sum(gray[:,0]) == 0:\n gray = np.delete(gray,0,1)\n\n while np.sum(gray[-1]) == 0:\n gray = gray[:-1]\n\n while np.sum(gray[:,-1]) == 0:\n gray = np.delete(gray,-1,1)\n\n _save_img_file('outputs/test_4.png',gray)\n #print(gray.shape)\n rows,cols = gray.shape\n\n if rows > cols:\n factor = 20.0/rows\n rows = 20\n cols = int(round(cols * factor))\n # first cols than rows\n gray = cv2.resize(gray, (cols, rows))\n else:\n factor = 20.0/cols\n cols = 20\n rows = int(round(rows * factor))\n # first cols than rows\n gray = cv2.resize(gray, (cols, rows))\n\n colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))\n rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))\n gray = np.lib.pad(gray,(rowsPadding,colsPadding),'constant')\n _save_img_file('outputs/test_5.png',gray)\n\n shiftx, shifty = getBestShift(gray)\n shifted = shift(gray, shiftx, shifty)\n gray = shifted\n \n _save_img_file('outputs/test_final.png',gray)\n\n return gray", "def preProcessImage(im, norm=True, blur=None, equalize=False, quantize=None):\n\n #Convert to float to avoid any overflow or rounding issues\n im = np.array(im, dtype='float64')\n if blur and blur > 0:\n im = filters.gaussian_filter(im, blur)\n\n if norm:\n im = filters.normalize(im, 0.0, None)\n else:\n im = im/255. #convert to floats between 0 and 1 without normalizing\n\n if equalize: \n im = filters.image_histogram_equalization(im)\n\n if quantize:\n im = np.rint(im * (quantize-1))/(quantize-1)\n \n return im" ]
[ "0.67525816", "0.6369292", "0.6323539", "0.62565947", "0.6214317", "0.5944521", "0.59443814", "0.587811", "0.5848547", "0.5848547", "0.5848547", "0.5751432", "0.5619287", "0.55748063", "0.555569", "0.5498724", "0.54805", "0.54788357", "0.5396808", "0.5392538", "0.53866196", "0.53689426", "0.5367867", "0.53633714", "0.53610855", "0.53379726", "0.5328915", "0.53267205", "0.5315837", "0.5295802" ]
0.67281544
1
Create label tensor for a minibatch training from a list of sequences for the provided target indices. If sequences are unequal then they are padded with a NaN value.
def create_labels_tensor_for_minibatch(sequences: List[ClassificationItemSequence[ScalarItem]], target_indices: List[int]) -> torch.Tensor: return sequences_to_padded_tensor( sequences=[seq.get_labels_at_target_indices(target_indices) for seq in sequences], padding_value=np.nan )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_labels_at_target_indices(self, target_indices: List[int]) -> torch.Tensor:\n target_indices = sorted(target_indices)\n\n def _get_label_or_nan(idx: int) -> torch.Tensor:\n return self.items[idx].label if idx < len(self.items) else torch.tensor([np.nan])\n\n if any(p < 0 for p in target_indices):\n raise ValueError(\"Argument target_indices cannot contain negative values\")\n\n return torch.stack(list(map(_get_label_or_nan, target_indices)))", "def target_tensor(len, labels, scores):\n target = [0]*len\n for id, l in enumerate(labels):\n target[l] = scores[id]\n\n return target", "def prepare_labels(state_mapping, sequences):\n encoded_labels = [[state_mapping[state] for state in label] for label in sequences]\n \n depth = len(state_mapping)\n one_hot_labels = [[one_hot_encode(label, depth) for label in sequence] for sequence in encoded_labels]\n one_hot_labels = [np.asarray(ls) for ls in one_hot_labels]\n return one_hot_labels", "def pad_labellings(labels):\n target_length = max([len(labels) for labels in labels])\n padded = []\n\n for label in labels:\n padding_size = target_length - len(label)\n\n padded_label = label + [0] * padding_size\n\n assert len(padded_label) > 0\n\n padded.append(padded_label)\n\n return padded", "def make_multilabel_target(num_classes, classes):\n target = np.zeros(num_classes, dtype=np.uint8)\n target[classes] = 1\n return target", "def repair_labels(labels):\n ret = np.copy(labels)\n ret[:, 0] = 10 # overwrite length to be stop seq\n ret = np.roll(ret, -1, axis=1) # move first to last\n return ret", "def prepare_train_input(insts, bos_idx, eos_idx, src_pad_idx, trg_pad_idx,\n n_head):\n src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(\n [inst[0] + [eos_idx] for inst in insts],\n src_pad_idx,\n n_head,\n is_target=False)\n src_word = src_word.reshape(-1, src_max_len)\n src_pos = src_pos.reshape(-1, src_max_len)\n trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(\n [[bos_idx] + inst[1] for inst in insts],\n trg_pad_idx,\n n_head,\n is_target=True)\n trg_word = trg_word.reshape(-1, trg_max_len)\n trg_pos = trg_pos.reshape(-1, trg_max_len)\n\n trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],\n [1, 1, trg_max_len, 1]).astype(\"float32\")\n\n lbl_word, lbl_weight, num_token = pad_batch_data(\n [inst[1] + [eos_idx] for inst in insts],\n trg_pad_idx,\n n_head,\n is_target=False,\n is_label=True,\n return_attn_bias=False,\n return_max_len=False,\n return_num_token=True)\n lbl_word = lbl_word.reshape(-1, 1)\n lbl_weight = lbl_weight.reshape(-1, 1)\n\n data_inputs = [\n src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos,\n trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight\n ]\n\n return data_inputs", "def gen_labels(self, nidxs=None, condense_labels=False):\n\n if nidxs is None:\n nidxs = self.nidx_train\n\n y = []\n\n for r in nidxs:\n y.append(self.node_labels[r])\n\n if condense_labels:\n # This should be improved, since this will fail if there are labels with exactly the same number of samples\n # Current solution use a bit of noise to minimize conflicts/favors\n y = self.encode_labels(y)\n lab_weights = 1. - np.mean(y, axis=0)\n noise = np.random.normal(loc=0, scale=0.0001, size=np.shape(y))\n y_condensed = np.argmax(minmax_scale(y * lab_weights + noise, axis=1), axis=1)\n return y_condensed\n\n return self.encode_labels(y)", "def indices_one_hot(labels_indices, num_classes=10):\n \n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n \n return labels_one_hot", "def indices_one_hot(labels_indices, num_classes=10):\n\n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n\n return labels_one_hot", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def padding_tensor(sequences, max_length=1000000):\n # get the number of sequences\n num = len(sequences)\n # get the maximum length (clip too long sequences)\n max_len = min(max([s.shape[0] for s in sequences]), max_length)\n # define new output dimensions\n out_dims = (num, max_len, *sequences[0].shape[1:])\n # create output_tensor with new dimensionality\n out_tensor = sequences[0].data.new(*out_dims).fill_(0)\n # create new mask_tensor with the corresponding mask\n mask = sequences[0].data.new(*out_dims).fill_(0)\n # iterate over the sequences\n logger.info('Start padding breaths....')\n with tqdm(\n total=len(sequences),\n bar_format=\"{desc:<5.5}{percentage:3.0f}%|{bar:100}{r_bar}\",\n ascii=True\n ) as pbar:\n for i, tensor in enumerate(sequences):\n # get the length of the current breath\n length = min(tensor.size(0), max_len)\n # add all valid breaths\n print(tensor)\n input('before')\n out_tensor[i, :length] = tensor[:length, :]\n # for the breaths that are \"too short\" padd with last value\n out_tensor[i, length:] = 0\n print(out_tensor)\n input('after')\n # create mask\n mask[i, :length] = 1\n # update progressbar\n pbar.update(1)\n\n # return result\n return max_len, out_tensor, mask", "def pad_batch_data(insts,\n pad_idx,\n n_head,\n is_target=False,\n is_label=False,\n return_attn_bias=True,\n return_max_len=True,\n return_num_token=False):\n return_list = []\n max_len = max(len(inst) for inst in insts)\n # Any token included in dict can be used to pad, since the paddings' loss\n # will be masked out by weights and make no effect on parameter gradients.\n inst_data = np.array(\n [inst + [pad_idx] * (max_len - len(inst)) for inst in insts])\n return_list += [inst_data.astype(\"int64\").reshape([-1, 1])]\n if is_label: # label weight\n inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst))\n for inst in insts])\n return_list += [inst_weight.astype(\"float32\").reshape([-1, 1])]\n else: # position data\n inst_pos = np.array([\n list(range(0, len(inst))) + [0] * (max_len - len(inst))\n for inst in insts\n ])\n return_list += [inst_pos.astype(\"int64\").reshape([-1, 1])]\n if return_attn_bias:\n if is_target:\n # This is used to avoid attention on paddings and subsequent\n # words.\n slf_attn_bias_data = np.ones(\n (inst_data.shape[0], max_len, max_len))\n slf_attn_bias_data = np.triu(slf_attn_bias_data,\n 1).reshape([-1, 1, max_len, max_len])\n slf_attn_bias_data = np.tile(slf_attn_bias_data,\n [1, n_head, 1, 1]) * [-1e9]\n else:\n # This is used to avoid attention on paddings.\n slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *\n (max_len - len(inst))\n for inst in insts])\n slf_attn_bias_data = np.tile(\n slf_attn_bias_data.reshape([-1, 1, 1, max_len]),\n [1, n_head, max_len, 1])\n return_list += [slf_attn_bias_data.astype(\"float32\")]\n if return_max_len:\n return_list += [max_len]\n if return_num_token:\n num_token = 0\n for inst in insts:\n num_token += len(inst)\n return_list += [num_token]\n return return_list if len(return_list) > 1 else return_list[0]", "def prepare_labels(labels, class_mask):\n mask = [1 if elt else -1 for elt in class_mask]\n mask = np.array(mask)\n return labels.dot(mask)", "def _extract_labels(self, samples: List):\n targets = [\n self.sp_model.encode(sample[2].lower().replace(\"<unk>\", \"<garbage>\").replace(\"\\n\", \"\"))\n for sample in samples\n ]\n targets = [\n [ele if ele != 4 else self.sp_model.unk_id() for ele in target] for target in targets\n ] # map id of <unk> token to unk_id\n lengths = torch.tensor([len(elem) for elem in targets]).to(dtype=torch.int32)\n targets = torch.nn.utils.rnn.pad_sequence(\n [torch.tensor(elem) for elem in targets],\n batch_first=True,\n padding_value=1.0,\n ).to(dtype=torch.int32)\n return targets, lengths", "def align_targets(predictions, targets):\n if (getattr(predictions, 'broadcastable', None) == (False, True) and\n getattr(targets, 'ndim', None) == 1):\n targets = as_theano_expression(targets).dimshuffle(0, 'x')\n return predictions, targets", "def _relabel(labels, minval=0, bgval=None):\n\n labels = np.unique(labels, return_inverse=True)[-1] + minval\n if bgval is not None:\n labels[labels == minval] = bgval\n return labels", "def make_padded_output_tensor(exs, output_indexer, max_len):\n return np.array([[ex.y_indexed[i] if i < len(ex.y_indexed) else output_indexer.index_of(PAD_SYMBOL) for i in range(0, max_len)] for ex in exs])", "def make_padded_output_tensor(exs, output_indexer, max_len):\n return np.array([[ex.y_indexed[i] if i < len(ex.y_indexed) else output_indexer.index_of(PAD_SYMBOL) for i in range(0, max_len)] for ex in exs])", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def prepare_data(seqs, addIdxNum=0, maxlen=None, win_size=1):\n # x: a list of sentences\n lengths = [len(s) for s in seqs]\n\n '''if maxlen is not None:\n new_seqs = []\n new_labels = []\n new_lengths = []\n for l, s, y in zip(lengths, seqs, labels):\n if l < maxlen:\n new_seqs.append(s)\n new_labels.append(y)\n new_lengths.append(l)\n lengths = new_lengths\n labels = new_labels\n seqs = new_seqs\n\n if len(lengths) < 1:\n return None, None, None'''\n\n n_samples = len(seqs)\n maxlen = numpy.max(lengths)\n\n '''\n n_samples : numbers of sentences\n '''\n\n x = numpy.zeros((maxlen, n_samples)).astype('int32')\n x_mask = numpy.zeros(((maxlen - addIdxNum) / win_size, n_samples)).astype(theano.config.floatX)\n\n for idx, s in enumerate(seqs):\n x[:lengths[idx], idx] = s\n x_mask[:((lengths[idx] - addIdxNum) / win_size), idx] = 1.\n\n #labels = numpy.asarray(labels).astype('int32')\n\n return x, x_mask, maxlen - addIdxNum", "def padded_sequences(input_sequences, total_words):\r\n max_len = max([len(x) for x in input_sequences])\r\n input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_len, padding='pre'))\r\n print(input_sequences)\r\n\r\n predictors, label = input_sequences[:, :-1], input_sequences[:, -1] # creates two variables: sequence / next word of Ngram\r\n label = ku.to_categorical(label, num_classes=total_words)\r\n return predictors, label, max_len", "def to_multi_label_matrix(target_labels: List[List[str]], label_names: List[str]) -> np.ndarray:\n def map_multi_label_line(line_labels: List[str]) -> List[int]:\n return [1 if label in line_labels else 0 for label in label_names]\n\n return np.array(list(map(map_multi_label_line, target_labels)))", "def make_padded_output_tensor(exs, output_indexer, max_len):\r\n return np.array(\r\n [[ex.y_indexed[i] if i < len(ex.y_indexed) else output_indexer.index_of(PAD_SYMBOL) for i in range(0, max_len)]\r\n for ex in exs])", "def get_batch(\n self, batch_indices, labels_important: bool\n ): # batch_indices is a list, e.g. one of labelled_set\n\n sequences, tags = [self.data[i] for i in batch_indices], [self.labels[i] for i in batch_indices]\n\n padded_sentences, lengths = pad_packed_sequence(\n pack_sequence(\n [torch.LongTensor(_) for _ in sequences], enforce_sorted=False\n ),\n batch_first=True,\n padding_value=self.padding_token,\n )\n padded_tags, _ = pad_packed_sequence(\n pack_sequence([torch.LongTensor(_) for _ in tags], enforce_sorted=False),\n batch_first=True,\n padding_value=self.empty_tag,\n )\n\n semi_supervision_mask = torch.ones(padded_tags.shape)\n\n if labels_important:\n # Fill in the words that have not been queried\n for j, sentence_tags in enumerate(padded_tags):\n sentence_index = batch_indices[j]\n for token_idx in range(int(lengths[j])):\n if token_idx in self.index.labelled_idx[sentence_index]:\n pass\n elif token_idx in self.index.temp_labelled_idx[sentence_index]:\n padded_tags[j, token_idx] = torch.tensor(self.temp_labels[sentence_index][token_idx])\n elif token_idx in self.index.unlabelled_idx[sentence_index]:\n padded_tags[j, token_idx] = torch.exp(torch.tensor(self.last_preds[sentence_index][token_idx]))\n semi_supervision_mask[\n j, token_idx\n ] = self.semi_supervision_multiplier\n else: # Padding\n continue\n\n return padded_sentences, padded_tags, lengths, semi_supervision_mask\n\n return padded_sentences, torch.tensor([]), lengths, semi_supervision_mask", "def sparse_tuple_from_label(self, sequences, dtype=np.int32):\n indices = []\n values = []\n \n for n, seq in enumerate(sequences):\n indices.extend(zip([n] * len(seq), range(len(seq))))\n values.extend(seq)\n \n indices = np.asarray(indices, dtype=np.int64)\n values = np.asarray(values, dtype=dtype)\n shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1] + 1], dtype=np.int64)\n \n return indices, values, shape", "def convert_to_one_hot(sequences, sequence_length, chars_length, char_to_index, labels):\n x = np.zeros((len(sequences), sequence_length, chars_length), dtype=np.bool)\n y = np.zeros((len(sequences), chars_length), dtype=np.bool)\n for i, sentence in enumerate(sequences):\n for t, char in enumerate(sentence):\n x[i, t, char_to_index[char]] = 1\n y[i, char_to_index[labels[i]]] = 1\n\n return x, y", "def get_weak_target(labels, lb_to_idx):\n classes_num = len(lb_to_idx)\n target = np.zeros(classes_num, dtype=np.bool)\n \n for label in labels: \n target[lb_to_idx[label]] = True\n \n return target", "def create_padding_mask(seq):\r\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\r\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\r", "def __init__(self, tbptt_seqs, list_of_other_seqs, batch_size,\n truncation_length,\n tbptt_one_hot_size=None, other_one_hot_size=None,\n random_state=None):\n self.tbptt_seqs = tbptt_seqs\n self.list_of_other_seqs = list_of_other_seqs\n self.batch_size = batch_size\n self.truncation_length = truncation_length\n\n self.random_state = random_state\n if random_state is None:\n raise ValueError(\"Must pass random state for random selection\")\n\n self.tbptt_one_hot_size = tbptt_one_hot_size\n\n self.other_one_hot_size = other_one_hot_size\n if other_one_hot_size is not None:\n assert len(other_one_hot_size) == len(list_of_other_seqs)\n\n tbptt_seqs_length = [n for n, i in enumerate(tbptt_seqs)][-1] + 1\n self.indices_lookup_ = {}\n s = 0\n for n, ts in enumerate(tbptt_seqs):\n if len(ts) >= truncation_length + 1:\n self.indices_lookup_[s] = n\n s += 1\n\n # this one has things removed\n self.tbptt_seqs_length_ = len(self.indices_lookup_)\n\n other_seqs_lengths = []\n for other_seqs in list_of_other_seqs:\n r = [n for n, i in enumerate(other_seqs)]\n l = r[-1] + 1\n other_seqs_lengths.append(l)\n self.other_seqs_lengths_ = other_seqs_lengths\n\n other_seqs_max_lengths = []\n for other_seqs in list_of_other_seqs:\n max_l = -1\n for os in other_seqs:\n max_l = len(os) if len(os) > max_l else max_l\n other_seqs_max_lengths.append(max_l)\n self.other_seqs_max_lengths_ = other_seqs_max_lengths\n\n # make sure all sequences have the minimum number of elements\n base = self.tbptt_seqs_length_\n for sl in self.other_seqs_lengths_:\n assert sl >= base\n\n # set up the matrices to slice one_hot indexes out of\n # todo: setup slice functions? or just keep handling in next_batch\n if tbptt_one_hot_size is None:\n self._tbptt_oh_slicer = None\n else:\n self._tbptt_oh_slicer = np.eye(tbptt_one_hot_size)\n\n if other_one_hot_size is None:\n self._other_oh_slicers = [None] * len(other_seq_lengths)\n else:\n self._other_oh_slicers = []\n for ooh in other_one_hot_size:\n if ooh is None:\n self._other_oh_slicers.append(None)\n else:\n self._other_oh_slicers.append(np.eye(ooh, dtype=np.float32))\n # set up the indices selected for the first batch\n self.indices_ = np.array([self.indices_lookup_[si]\n for si in self.random_state.choice(self.tbptt_seqs_length_,\n size=(batch_size,), replace=False)])\n # set up the batch offset indicators for tracking where we are\n self.batches_ = np.zeros((batch_size,), dtype=np.int32)" ]
[ "0.64945525", "0.61621475", "0.5966142", "0.5797458", "0.5754382", "0.57243425", "0.56933594", "0.56704473", "0.5669515", "0.5635334", "0.56328714", "0.5534819", "0.551395", "0.5425681", "0.54200053", "0.54051876", "0.54023015", "0.5368551", "0.5368551", "0.53455186", "0.5323843", "0.5310559", "0.53074336", "0.5304597", "0.5304121", "0.5297368", "0.5291803", "0.5285593", "0.5283412", "0.5270663" ]
0.8987712
0
Given a url string, find the file extension at the end.
def get_ext(url): path = urlparse(url).path ext = splitext(path)[1] return ext
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_file_extension(url_file):\n pattern = re.split(\"\\.\",url_file)\n return pattern[-1]", "def get_ext(url):\r\n root, ext = splitext(url)\r\n return ext", "def _get_file_name(url: str) -> str:\n url = url.strip('/')\n result = findall(r'/(\\w+\\.\\w+)[?|$]', url)\n if result:\n return result[-1]\n return url.split('/')[-1]", "def getExtension(filename):\n return filename[filename.rfind('.'):]", "def get_extension(srcurl):\r\n if 'youtu' in srcurl:\r\n return 'video/youtube'\r\n else:\r\n disassembled = urlparse(srcurl)\r\n file_ext = splitext(basename(disassembled.path))[1]\r\n return 'video/' + file_ext.replace('.', '')", "def get_file_extention(file_path):\n # http://domain.tld/foo.bar -> foo.bar\n filename = os.path.basename(file_path)# Make sure we don't include domain names\n # foo.bar -> bar\n # foo.bar?baz -> bar\n # foobar/baz -> None\n # foobar/baz?fizz -> None\n file_extention_regex = \"\"\"\\.([a-zA-Z0-9]+)[?]?\"\"\"\n file_extention_search = re.search(file_extention_regex, filename, re.IGNORECASE)\n if file_extention_search:\n file_extention = file_extention_search.group(1)\n return file_extention", "def guess_ext_from_url(self):\n # http://docs.python.org/2.7/library/urlparse.html#urlparse.urlsplit\n # 0:scheme,1:netloc,2:path,3:query\n url_path = urlparse.urlsplit(self.what_url)[2]\n # path index is 2,Hierarchical path,may be empty string\n if '' == url_path:\n self.save_file_ext = None\n else:\n # 0: root 1: .ext\n file_name_info = os.path.splitext(url_path)\n # '.exe', contain .\n self.save_file_ext = file_name_info[1]", "def _get_extension_from_string(path):\n file_name_parts = os.path.basename(path).split('.')\n if len(file_name_parts) == 1: # no periods in file name\n return ''\n if len(file_name_parts) > 2: # two or more periods in file name\n return '.'.join(file_name_parts[-2:])\n return file_name_parts[-1] # one period in file name", "def get_file_ext(filename):\n return filename.rsplit('.', 1)[1]", "def get_extension(filename: str) -> str:\n return filename.split(\".\")[-1]", "def get_filename_from_url(url: str) -> str:\n return os.path.basename(urllib.parse.urlparse(urllib.parse.unquote_plus(url)).path)", "def filename_from(url):\n filename = url.split('/')[-1]\n return filename", "def find_file_extention(file_name):\n \n index = file_name.rfind(\".\")\n ext = file_name[index:].lower()\n \n return ext", "def find_extension(file):\n\n index_ext = file.name.rfind('.')\n if index_ext != -1:\n return file.name[index_ext+1:]\n # else: we raise an exception because\n # we can't find any extension", "def getExtension(link):\n if re.match('.*(?P<e>\\..*)/?$', link) is not None:\n e = re.search('.*(?P<e>\\..*)/?$', link).group('e')\n return e.replace('/', '')\n return \"\"", "def _filename_from_url(url):\n file_name = url.split(\"/\")[-1]\n return file_name", "def url_file_name(url):\r\n return url[url.rfind('/') + 1:]", "def url_filename(url):\n return os.path.basename(urlparse.urlparse(url).path)", "def get_extension(filename: str) -> str:\n return Path(filename).suffix[1:]", "def get_filename(url: str) ->str:\n if 'drive.google.com' in url:\n return _extract_google_drive_file_id(url)\n url, filename = os.path.split(url)\n return filename or os.path.basename(url)", "def get_file_extension(filename):\n if not filename:\n return \"\"\n\n dotpos = filename.rfind(\".\")\n return filename[dotpos + 1:].lower() if dotpos != -1 else \"\"", "def filename_ext(filename):\n base = os.path.basename(filename)\n return os.path.splitext(base)[1][1:]", "def get_file_name(url: str):\n filename = os.path.basename(url)\n fname, extension = os.path.splitext(filename)\n if extension:\n if \"=\" in filename:\n return filename.split(\"=\")[-1]\n return filename\n header = requests.head(url).headers\n if \"Location\" in header:\n return os.path.basename(header[\"Location\"])\n return filename", "def file_ext(path):\n result = os.path.splitext(path)[1]\n return result", "def get_filename_extension(filename):\n m = FILENAME_EXTENSION_RE.search(filename)\n return m.group(1) if m else None", "def _getFileExtension( filepath ):\r\n file = os.path.splitext(filepath.lower())\r\n if len( file ):\r\n return file[1].replace( '.', '' )\r\n else:\r\n return filepath", "def _get_ext(self, path):\n return os.path.splitext(path)[1][1:]", "def get_ext(f_name):\n \n for i in range(len(f_name)-1,-1,-1):\n if f_name[i]=='.':\n return f_name[i:]\n return None", "def get_path(url):\n # Different resources like customers, orders as each of them have files\n # names like 1.xml, 2.xml for individual records.\n # If the last part of URL is an integer, return last two parts of URL\n first, second_last, last = url.rsplit('/', 2)\n\n if last.isdigit():\n return os.path.join(second_last, last + '.xml')\n else:\n # If the last part is not an integer, return last part itself\n return last + '.xml'", "def get_extension_from_filename(filename):\n return filename[-4:]" ]
[ "0.86134696", "0.84738183", "0.80189323", "0.7584992", "0.75357443", "0.7505225", "0.745068", "0.7387205", "0.73681", "0.7355658", "0.73300385", "0.7305834", "0.7290223", "0.7265099", "0.7252523", "0.72133154", "0.72030866", "0.7150729", "0.7100172", "0.70208377", "0.6979139", "0.6964325", "0.6954835", "0.6923859", "0.69218695", "0.69176245", "0.6914993", "0.69117504", "0.69081134", "0.69080997" ]
0.8476722
1
Get document context. Lugar staff will be entering and editing URLs for the following 3 document types, and should be editable in their original form but also saved as archived links
def get_document_context(context): document_types = { 'transcript': 'transcript', 'opening_statement_chair': 'chair opening statement', 'opening_statement_rm': 'ranking member opening statement', } eventdocuments_qs = EventDocument.objects.filter( event_id=context['hearing'] ) for key, value in document_types.items(): try: doc = eventdocuments_qs.get(note=value) context[key] = EventDocumentLink.objects.exclude( text='archived' ).filter( document_id=doc )[0].url except (ObjectDoesNotExist): context[key] = None try: context[key + '_archived'] = EventDocumentLink.objects.get( document_id=doc, text='archived' ).url except (ObjectDoesNotExist, UnboundLocalError): pass return context
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def document_details(context, document):\n return {'document': document, 'request': context['request']}", "def get_doc_context(self, docname, body, metatags):\n\n # find out relations\n prev = next = None\n parents = []\n rellinks = self.globalcontext['rellinks'][:]\n related = self.relations.get(docname)\n # Populate titles with the list of longtitles from the env instead of titles\n # titles = self.env.titles\n titles = self.env.longtitles\n if related and related[2]:\n try:\n next = {\n 'link': self.get_relative_uri(docname, related[2]),\n 'title': self.render_partial(titles[related[2]])['title']\n }\n rellinks.append((related[2], next['title'], 'N', _('next')))\n except KeyError:\n next = None\n if related and related[1]:\n try:\n prev = {\n 'link': self.get_relative_uri(docname, related[1]),\n 'title': self.render_partial(titles[related[1]])['title']\n }\n rellinks.append((related[1], prev['title'], 'P', _('previous')))\n except KeyError:\n # the relation is (somehow) not in the TOC tree, handle\n # that gracefully\n prev = None\n while related and related[0]:\n try:\n parents.append(\n {'link': self.get_relative_uri(docname, related[0]),\n 'title': self.render_partial(titles[related[0]])['title']})\n except KeyError:\n pass\n related = self.relations.get(related[0])\n if parents:\n parents.pop() # remove link to the master file; we have a generic\n # \"back to index\" link already\n parents.reverse()\n\n # title rendered as HTML\n title = self.env.longtitles.get(docname)\n title = title and self.render_partial(title)['title'] or ''\n # the name for the copied source\n sourcename = self.config.html_copy_source and docname + '.txt' or ''\n\n # metadata for the document\n meta = self.env.metadata.get(docname)\n\n # local TOC and global TOC tree\n self_toc = self.env.get_toc_for(docname, self)\n toc = self.render_partial(self_toc)['fragment']\n\n return dict(\n parents = parents,\n prev = prev,\n next = next,\n title = title,\n meta = meta,\n body = body,\n metatags = metatags,\n rellinks = rellinks,\n sourcename = sourcename,\n toc = toc,\n # only display a TOC if there's more than one item to show\n display_toc = (self.env.toc_num_entries[docname] > 1),\n )", "def get_doc_context(self, docname, body, metatags):\n\n # TYPO3: remove 'documentation' from end of 'shorttitle'\n shorttitle = self.globalcontext.get('shorttitle', '')\n if shorttitle and shorttitle.endswith(' documentation'):\n shorttitle = shorttitle[0:-14].rstrip()\n self.globalcontext['shorttitle'] = shorttitle\n \n\n # find out relations\n # TYPO3: always have order 'previous', 'up', 'next'\n prev = up = next = None\n parents = []\n rellinks = self.globalcontext['rellinks'][:]\n related = self.relations.get(docname)\n titles = self.env.titles\n if related and related[1]:\n try:\n prev = {\n 'link': self.get_relative_uri(docname, related[1]),\n 'title': self.render_partial(titles[related[1]])['title']\n }\n rellinks.append((related[1], prev['title'], 'P', _('previous')))\n except KeyError:\n # the relation is (somehow) not in the TOC tree, handle\n # that gracefully\n prev = None\n if related and related[0]:\n try:\n up = {\n 'link': self.get_relative_uri(docname, related[0]),\n 'title': self.render_partial(titles[related[0]])['title']\n }\n rellinks.append((related[0], up['title'], 'U', _('up')))\n except KeyError:\n # the relation is (somehow) not in the TOC tree, handle\n # that gracefully\n prev = None\n if related and related[2]:\n try:\n next = {\n 'link': self.get_relative_uri(docname, related[2]),\n 'title': self.render_partial(titles[related[2]])['title']\n }\n rellinks.append((related[2], next['title'], 'N', _('next')))\n except KeyError:\n next = None\n while related and related[0]:\n try:\n parents.append(\n {'link': self.get_relative_uri(docname, related[0]),\n 'title': self.render_partial(titles[related[0]])['title']})\n except KeyError:\n pass\n related = self.relations.get(related[0])\n if parents:\n parents.pop() # remove link to the master file; we have a generic\n # \"back to index\" link already\n parents.reverse()\n\n # title rendered as HTML\n title = self.env.longtitles.get(docname)\n title = title and self.render_partial(title)['title'] or ''\n # the name for the copied source\n sourcename = self.config.html_copy_source and docname + '.txt' or ''\n\n # metadata for the document\n meta = self.env.metadata.get(docname)\n\n # local TOC and global TOC tree\n self_toc = self.env.get_toc_for(docname, self)\n toc = self.render_partial(self_toc)['fragment']\n\n return dict(\n parents = parents,\n prev = prev,\n next = next,\n title = title,\n meta = meta,\n body = body,\n metatags = metatags,\n rellinks = rellinks,\n sourcename = sourcename,\n toc = toc,\n # only display a TOC if there's more than one item to show\n display_toc = (self.env.toc_num_entries[docname] > 1),\n )", "def get_context(self, publish=False):\n context = self.project.DEFAULT_CONTEXT\n try:\n file = self.project.CONTEXT_SOURCE_FILE\n # CSV\n if re.search(r'(csv|CSV)$', file):\n context.update(self.get_context_from_csv())\n # Excel\n if re.search(r'(xlsx|XLSX|xls|XLS)$', file):\n context.update(self.get_context_from_xlsx())\n except AttributeError:\n context.update(self.get_context_from_gdoc())\n\n return context", "def get_document(self):\n return self.document", "def _context(self):\n domain = Site.objects.get_current()\n scheme = 'http' if settings.DEBUG else 'https'\n return {\n 'event': self.event,\n 'documents': self.documents.all(),\n 'mchp_base_url': '{}://{}'.format(scheme, domain)\n }", "def GetDocument(self, *args, **kwargs):\n pass", "def get_document(self, docid):\n raise NotImplementedError", "def get_context(self):\r\n _context = EditingDescriptor.get_context(self)\r\n # Add some specific HTML rendering context when editing HTML modules where we pass\r\n # the root /c4x/ url for assets. This allows client-side substitutions to occur.\r\n _context.update({\r\n 'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location.course_key),\r\n 'enable_latex_compiler': self.use_latex_compiler,\r\n 'editor': self.editor\r\n })\r\n return _context", "def docs(request):\n # if request.user.is_authenticated():\n # return redirect('/fastapp')\n return context()", "def edit_document():", "def documentdetail(request, docid):\n\tdocument = get_object_or_404(Document, pk=docid)\n\tif not request.user.has_perm('documents.view_document', obj=document):\n\t\treturn HttpResponse(loader.render_to_string('401.html',\n\t\t\tRequestContext(request, {'error_message':\n\t\t\t\t_(\"You are not allowed to view this document.\")})), status=401)\n\ttry:\n\t\trelated = document.content_type.get_object_for_this_type(id=document.object_id)\n\texcept:\n\t\trelated = ''\n\n\treturn render_to_response(\"documents/docinfo.html\", RequestContext(request, {\n\t\t'permissions_json': json.dumps(_perms_info(document, DOCUMENT_LEV_NAMES)),\n\t\t'document': document,\n\t\t'imgtypes': imgtypes,\n\t\t'related': related\n\t}))", "def getDocumentId(self): #$NON-NLS-1$\r", "def get_context(event):\n for link in event.links.links:\n if link.get(\"type\") == \"CONTEXT\":\n context = link.get(\"target\")\n break\n else:\n context = None\n return context", "def _getForDocument (self):\n return self.__forDocument", "def get_html_document(self, context, request: TracimRequest, hapic_data=None) -> ContentInContext: # nopep8\n app_config = request.registry.settings['CFG']\n api = ContentApi(\n current_user=request.current_user,\n session=request.dbsession,\n config=app_config,\n )\n content = api.get_one(\n hapic_data.path.content_id,\n content_type=ContentType.Any\n )\n return api.get_content_in_context(content)", "def context(self) -> CONTEXT:", "def _get_edit_document_options(self, document, agenda_item, meeting):\n checkout_manager = getMultiAdapter((document, self.request),\n ICheckinCheckoutManager)\n\n button = {}\n button['visible'] = bool(checkout_manager.check_permission(\n 'Modify portal content'))\n button['active'] = button['visible'] and (\n checkout_manager.is_checkout_allowed() or\n checkout_manager.is_checked_out_by_current_user())\n button['url'] = meeting.get_url(\n view='agenda_items/{}/edit_document'.format(\n agenda_item.agenda_item_id))\n return {\n 'document_checked_out': bool(\n checkout_manager.get_checked_out_by()),\n 'edit_document_button': button}", "def document_edit(document_id):\n\n log(session['login'], 'updated', 'document {}'.format(document_id))\n\n doc = Document.query.filter(Document.id == document_id).first_or_404()\n doc.title = request.form['title']\n doc.price = request.form['price']\n doc.keywords = comma_to_list(request.form['keywords'])\n doc.authors = comma_to_list(request.form['authors'])\n try:\n copy_delta = int(request.form.get('copy_delta', 0))\n except:\n copy_delta = 0\n if copy_delta > 0:\n for _ in range(copy_delta):\n dc = DocumentCopy(document=doc)\n elif copy_delta < 0:\n if -copy_delta <= len(doc.available_copies):\n # noinspection PyComparisonWithNone\n dcs = DocumentCopy.query.filter(DocumentCopy.document == doc, DocumentCopy.loan == None).limit(\n -copy_delta).all()\n for dc in dcs:\n db.session.delete(dc)\n db.session.commit()\n if doc.type == 'book':\n doc.edition = request.form['edition']\n doc.publisher = request.form['publisher']\n doc.publishment_year = request.form['publishment_year']\n doc.bestseller = 'bestseller' in request.form\n doc.reference = 'reference' in request.form\n\n db.session.add(doc)\n db.session.commit()\n\n from hexagonal.ui.user import update_qr_dates\n update_qr_dates()\n\n return redirect(request.referrer)", "def document_new():\n\n t = request.form['type']\n if t == 'book':\n doc = Book(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n edition=request.form['edition'],\n publisher=request.form['publisher'],\n publishment_year=request.form['publishment_year'],\n bestseller='bestseller' in request.form,\n reference='reference' in request.form\n )\n elif t == 'av':\n doc = AVMaterial(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors'])\n )\n elif t == 'article':\n doc = JournalArticle(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n issue_editor=request.form['issue_editor'],\n issue_publication_date=request.form['issue_publication_date'],\n journal=request.form['journal']\n )\n\n for i in range(int(request.form['copies'])):\n dc = DocumentCopy(document=doc)\n\n db.session.add(doc)\n db.session.commit()\n\n log(session['login'], 'created', 'document {}'.format(doc.id))\n\n # TODO\n return redirect('/admin/documents')", "def test_get_application_document_with_context(self, test_client, db_session, auth_headers):\n now_application = NOWApplicationFactory()\n now_application_identity = NOWApplicationIdentityFactory(now_application=now_application)\n\n get_resp = test_client.get(\n f'/now-applications/application-document-types/RJL?context_guid={now_application_identity.now_application_guid}',\n headers=auth_headers['full_auth_header'])\n assert get_resp.status_code == 200, get_resp.response\n get_data = json.loads(get_resp.data.decode())\n assert get_data['document_template']\n assert get_data['document_template'].get('form_spec'), get_data\n mine_no_item = [\n x for x in get_data['document_template']['form_spec'] if x['id'] == \"mine_no\"\n ][0]\n assert mine_no_item\n assert mine_no_item['id'] == 'mine_no'\n assert mine_no_item['context-value'] == str(now_application_identity.mine.mine_no)", "def getMenuItems(self, context, request):\n\n results = []\n url = context.absolute_url()\n can_tag = not sdct.IStructuredDocument.providedBy(context)\n can_untag = not can_tag\n \n if can_tag:\n results.append(\n { 'title' : \"Mark as a structured document\",\n 'description' : 'Mark the content as a structured document',\n 'action' : \"%s/@@sd.tagging\" % url,\n 'selected' : False,\n 'icon' : u\"\",\n 'extra' : {'id': 'sd_tag',\n 'separator': None,\n 'class': ''\n },\n 'submenu' : None,\n }\n )\n else:\n layout = context.getLayout()\n results.append(\n { 'title' : \"Remove structured document options\",\n 'description' : 'Restore the content normal behavior',\n 'action' : \"%s/@@sd.untagging\" % url,\n 'selected' : False,\n 'icon' : u\"\",\n 'extra' : {'id': 'sd_untag',\n 'separator': None,\n 'class': ''\n },\n 'submenu' : None,\n }\n )\n \n results.append(\n { 'title' : \"Document on one page\",\n 'description' : 'Change the display of the document',\n 'action' : (\"%s/@@sd.options?layout=@@sd.document.onepage\"\n % url),\n 'selected' : layout == '@@sd.document.onepage',\n 'icon' : u\"\",\n 'extra' : {'id': 'sd_document_onepage',\n 'separator': 'actionSeparator',\n 'class': ''\n },\n 'submenu': None,\n }\n )\n\n return results", "def doc(request, doc_id):\n doc_obj = get_object_or_404(Document, pk=doc_id)\n author_person_objs = doc_obj.author_person.all()\n author_organization_objs = doc_obj.author_organization.all()\n recipient_person_objs = doc_obj.recipient_person.all()\n recipient_organization_objs = doc_obj.recipient_organization.all()\n cced_person_objs = doc_obj.cced_person.all()\n cced_organization_objs = doc_obj.cced_organization.all()\n page_objs = doc_obj.page_set.all()\n obj_dict = {\n 'doc_obj': doc_obj,\n 'author_person_objs': author_person_objs,\n 'author_organization_objs': author_organization_objs,\n 'recipient_person_objs': recipient_person_objs,\n 'recipient_orgaization_objs': recipient_organization_objs,\n 'cced_person_objs': cced_person_objs,\n 'cced_organization_objs': cced_organization_objs,\n 'page_objs': page_objs\n }\n return render(request, 'doc.jinja2', obj_dict)", "def get_reference_for(document):\n k = Factory.build('shared.doc_reference')\n for key in ('name', 'canonical_name'):\n conditional_copy(document, k, key)\n if not getattr(k, 'canonical_name'):\n if getattr(k,'name'):\n setattr(k,'canonical_name',getattr(k,'name'))\n for key in ('version',):\n conditional_copy(document._meta, k, key)\n for inkey, outkey in [('uid','id'),]:\n conditional_copy(document._meta, k, inkey, outkey)\n for inkey, outkey in [('type_key','type'),]:\n setattr(k, outkey, getattr(document._osl,inkey))\n return k", "def getdoc():\n\n\timport webnotes\n\tfrom webnotes.utils import cint\n\t\n\tform = webnotes.form_dict\n\tdoctype, docname = form.get('doctype'), form.get('name')\n\tprefix = cint(form.get('from_archive')) and 'arc' or 'tab'\n\n\tif not (doctype and docname):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tdoclist = []\n\t# single\n\tdoclist = load_single_doc(doctype, docname, (form.get('user') or webnotes.session['user']), prefix)\n\t\n\t# load doctype along with the doc\n\tif form.get('getdoctype'):\n\t\timport webnotes.model.doctype\n\t\tdoclist += webnotes.model.doctype.get(doctype)\n\n\t# tag as archived\n\tif prefix == 'arc':\n\t\tdoclist[0].__archived=1\n\n\twebnotes.response['docs'] = doclist", "def document(self) -> str:\n return pulumi.get(self, \"document\")", "def document_view(index_name, doc_type, doc_id):\n resp = es.get(index=index_name, doc_type=doc_type, id=doc_id)\n document = resp[\"_source\"]\n print(document)", "def get_context(self):\n uuid = self.data.get('uuid', None)\n if uuid is None:\n return\n item = ploneapi.content.get(UID=uuid)\n return item", "def __call__(self, doc):\n return doc", "def get_context_from_gdoc(self):\n try:\n start = int(time.time())\n if not self.data or start > self.expires:\n self.data = self._get_context_from_gdoc(self.project.SPREADSHEET_KEY)\n end = int(time.time())\n ttl = getattr(self.project, 'SPREADSHEET_CACHE_TTL',\n SPREADSHEET_CACHE_TTL)\n self.expires = end + ttl\n return self.data\n except AttributeError:\n return {}" ]
[ "0.7030186", "0.66666174", "0.6468771", "0.62081987", "0.60369486", "0.58938795", "0.5826926", "0.5760497", "0.57392025", "0.57280135", "0.5723985", "0.5702111", "0.56988734", "0.5690935", "0.56782657", "0.5647661", "0.558646", "0.5575863", "0.55497485", "0.5548884", "0.5516756", "0.551302", "0.54984164", "0.5485797", "0.54833555", "0.5458738", "0.5458469", "0.54205227", "0.54203993", "0.5368495" ]
0.7395891
0
Find and create committees as EventParticipants.
def save_committees(event, committees): for committee in committees: name = committee.name organization = Organization.objects.get(id=committee.id) entity_type = "organization" new_committee = EventParticipant( name=name, event=event, organization=organization, entity_type=entity_type ) new_committee.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_committees(collection, committees):\n collection.remove()\n for c in committees:\n if not collection.find_one({\"committee_id\": c[\"committee_id\"]}):\n collection.insert(c)", "def invite_org_or_player(self, event):\n for arg in self.lhslist:\n org, pc = self.get_org_or_dompc(arg)\n if event:\n if org:\n if org in event.orgs.all():\n raise self.CalCmdError(\"That organization is already invited.\")\n event.invite_org(org)\n else:\n if pc in event.dompcs.all():\n raise self.CalCmdError(\"They are already invited.\")\n event.add_guest(pc)\n else:\n proj = self.project\n if not proj:\n raise self.CalCmdError(\n \"You must use /create first or specify an event.\"\n )\n if org:\n if org.id in proj[\"org_invites\"]:\n raise self.CalCmdError(\"That organization is already invited.\")\n proj[\"org_invites\"].append(org.id)\n else:\n if pc.id in proj[\"hosts\"] or pc.id in proj[\"gms\"]:\n raise self.CalCmdError(\n \"They are already invited to host or gm.\"\n )\n if pc.id in proj[\"invites\"]:\n raise self.CalCmdError(\"They are already invited.\")\n proj[\"invites\"].append(pc.id)\n self.msg(\"{wInvited {c%s{w to attend.\" % (pc or org))", "def main(options):\n\n BASE_PATH = settings.CONGRESS_LEGISLATORS_PATH\n \n meeting_processor = CommitteeMeetingProcessor()\n\n log.info('Processing committees')\n COMMITTEES_FILE = BASE_PATH + 'committees-current.yaml'\n\n if not File.objects.is_changed(COMMITTEES_FILE) and not options.force:\n log.info('File %s was not changed' % COMMITTEES_FILE)\n else:\n tree = yaml_load(COMMITTEES_FILE)\n total = len(tree)\n progress = Progress(total=total)\n seen_committees = set()\n for committee in tree:\n try:\n cobj = Committee.objects.get(code=committee[\"thomas_id\"])\n except Committee.DoesNotExist:\n print \"New committee:\", committee[\"thomas_id\"]\n cobj = Committee(code=committee[\"thomas_id\"])\n \n cobj.committee_type = TYPE_MAPPING[committee[\"type\"]]\n cobj.name = committee[\"name\"]\n cobj.url = committee.get(\"url\", None)\n cobj.obsolete = False\n cobj.committee = None\n cobj.save()\n seen_committees.add(cobj.id)\n\n for subcom in committee.get('subcommittees', []):\n code = committee[\"thomas_id\"] + subcom[\"thomas_id\"]\n try:\n sobj = Committee.objects.get(code=code)\n except Committee.DoesNotExist:\n print \"New subcommittee:\", code\n sobj = Committee(code=code)\n \n sobj.name = subcom[\"name\"]\n sobj.url = subcom.get(\"url\", None)\n sobj.type = None\n sobj.committee = cobj\n sobj.obsolete = False\n sobj.save()\n seen_committees.add(sobj.id)\n \n progress.tick()\n \n # Check for non-obsolete committees in the database that aren't in our\n # file.\n other_committees = Committee.objects.filter(obsolete=False).exclude(id__in=seen_committees)\n if len(other_committees) > 0:\n print \"Marking obsolete:\", \", \".join(c.code for c in other_committees)\n other_committees.update(obsolete=True)\n\n File.objects.save_file(COMMITTEES_FILE)\n \n log.info('Processing committee members')\n MEMBERS_FILE = BASE_PATH + 'committee-membership-current.yaml'\n file_changed = File.objects.is_changed(MEMBERS_FILE)\n\n if not file_changed and not options.force:\n log.info('File %s was not changed' % MEMBERS_FILE)\n else:\n # map THOMAS IDs to GovTrack IDs\n y = yaml_load(BASE_PATH + \"legislators-current.yaml\")\n person_id_map = { }\n for m in y:\n if \"id\" in m and \"govtrack\" in m[\"id\"] and \"thomas\" in m[\"id\"]:\n person_id_map[m[\"id\"][\"thomas\"]] = m[\"id\"][\"govtrack\"]\n \n # load committee members\n tree = yaml_load(MEMBERS_FILE)\n total = len(tree)\n progress = Progress(total=total, name='committees')\n \n # We can delete CommitteeMember objects because we don't have\n # any foreign keys to them.\n CommitteeMember.objects.all().delete()\n\n # Process committee nodes\n for committee, members in tree.items():\n try:\n cobj = Committee.objects.get(code=committee)\n except Committee.DoesNotExist:\n print \"Committee not found:\", committee\n continue\n\n # Process members of current committee node\n for member in members:\n mobj = CommitteeMember()\n mobj.person = Person.objects.get(id=person_id_map[member[\"thomas\"]])\n mobj.committee = cobj\n if \"title\" in member:\n mobj.role = ROLE_MAPPING[member[\"title\"]]\n mobj.save()\n \n progress.tick()\n\n File.objects.save_file(MEMBERS_FILE)\n \n return\n\n log.info('Processing committee schedule')\n SCHEDULE_FILE = 'data/us/112/committeeschedule.xml'\n file_changed = File.objects.is_changed(SCHEDULE_FILE)\n\n if not file_changed and not options.force:\n log.info('File %s was not changed' % SCHEDULE_FILE)\n else:\n tree = etree.parse(SCHEDULE_FILE)\n \n # We have to clear out all CommitteeMeeting objects when we refresh because\n # we have no unique identifier in the upstream data for a meeting. We might use\n # the meeting's committee & date as an identifier, but since meeting times can\n # change this might have awkward consequences for the end user if we even\n # attempted to track that.\n\n CommitteeMeeting.objects.all().delete()\n\n # Process committee event nodes\n for meeting in tree.xpath('/committee-schedule/meeting'):\n try:\n mobj = meeting_processor.process(CommitteeMeeting(), meeting)\n mobj.save()\n \n mobj.bills.clear()\n for bill in meeting.xpath('bill'):\n bill = Bill.objects.get(congress=bill.get(\"session\"), bill_type=BillType.by_xml_code(bill.get(\"type\")), number=int(bill.get(\"number\")))\n mobj.bills.add(bill)\n except Committee.DoesNotExist:\n log.error('Could not load Committee object for meeting %s' % meeting_processor.display_node(meeting))\n\n for committee in Committee.objects.all():\n if not options.disable_events:\n committee.create_events()\n \n File.objects.save_file(SCHEDULE_FILE)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for party in added:\n Notify(context, party)", "def create_participants(self, nagg, nneu, custom):\n neu = [NeutralScooterCompany() for _ in range(nneu)]\n agg = [AggressiveScooterCompany() for _ in range(nagg)]\n parts = neu + agg\n if custom is not None:\n parts += [custom]\n self.participants = parts\n return", "def save_with_invites(self, ical, attendees, **attendeeoptions):\n ## TODO: consolidate together with save_*\n obj = self._calendar_comp_class_by_data(ical)(data=ical, client=self.client)\n obj.parent = self\n obj.add_organizer()\n for attendee in attendees:\n obj.add_attendee(attendee, **attendeeoptions)\n obj.id = obj.icalendar_instance.walk(\"vevent\")[0][\"uid\"]\n obj.save()\n return obj", "def save(self, commit=True):\r\n event = super(RPEventCreateForm, self).save(commit)\r\n event.add_host(self.owner, main_host=True)\r\n hosts = self.cleaned_data.get(\"hosts\", [])\r\n for host in hosts:\r\n # prevent owner from being downgraded to normal host if they were added\r\n if host != self.owner:\r\n event.add_host(host)\r\n gms = self.cleaned_data.get(\"gms\", [])\r\n for gm in gms:\r\n event.add_gm(gm)\r\n for guest in self.cleaned_data.get(\"invites\", []):\r\n if guest in hosts or guest in gms or guest == self.owner:\r\n continue\r\n event.add_guest(guest)\r\n for org in self.cleaned_data.get(\"org_invites\", []):\r\n event.invite_org(org)\r\n plot = self.cleaned_data.get(\"plot\", None)\r\n if plot:\r\n # we create a blank PlotUpdate so that this is tagged to the Plot, but nothing has happened yet\r\n event.beat = plot.updates.create()\r\n event.save()\r\n self.pay_costs()\r\n self.post_event(event)\r\n return event", "def add_external_commitments(ecs):\n update_commitments(external_to_add=ecs)", "def create_attendees(event, attendees_dict):\n attendees_list = []\n for record in attendees_dict:\n attendee = Attendee()\n attendee.event = event\n attendee.email = record.get('email', '')\n # Converting camelCase to snake_case\n attendee.response = ''.join(\n i if i.islower() else f'_{i.lower()}' for i\n in record['responseStatus']\n )\n if record.get('self') and record.get('responseStatus') == ACCEPTED:\n event.is_attendee = True\n else:\n attendees_list.append(attendee)\n Attendee.objects.bulk_create(attendees_list)\n event.save()", "def add_players_on_floor(self):\n for period in self.Periods:\n # set current players to be period starters\n current_players = period.Starters.copy()\n for pbp_event in period.Events:\n if pbp_event.is_substitution():\n coming_in = pbp_event.player2_id\n going_out = pbp_event.player_id\n team_id = pbp_event.team_id\n current_players[team_id] = [coming_in if player == going_out else player for player in current_players[team_id]]\n pbp_event.current_players = current_players.copy()", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n Notify(context)", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n Notify(context)", "def participants(self):\r\n return Participants(self)", "def committees():\n os_committees = Committee()\n os_committees.query()\n os_committees.parse()\n wiki_functions.write_to_csv_file_for_DataTransfer(os_committees,\n os_committees.table)", "def participants(self):\n return Participants(self)", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def post(self, event_id: int) -> Response:\n body = request.get_json(force=True)\n\n if not body.get('participants'):\n return jsonify({\n \"status\": 404,\n \"message\": \"Empty or unprovided participants list\"\n })\n\n for username in body.get('participants'):\n participant = UserModel.find_by_username(username)\n\n if participant is None:\n if UserModel.exists_remote(username):\n participant = UserModel(username=username)\n participant.save_to_db()\n else:\n return jsonify({\n \"status\": 404,\n \"message\": \"User <{}> not found\".format(username)\n })\n\n if participant in self.event.guests:\n return jsonify({\n \"status\": 400,\n \"message\": \"<{}> already registered for event as guest\".format(participant.username)\n })\n\n if participant in self.event.participants:\n return jsonify({\n \"status\": 400,\n \"message\": \"<{}> already registered for event as participant\".format(participant.username)\n })\n\n self.event.add_participant(participant)\n\n return jsonify({\n \"status\": 200,\n \"message\": \"All users were successfully registered for event participants\"\n })", "def participants(self):\n for participant in self.get_data(\"participants\"):\n yield Participant(participant, **self._new_session_args)\n\n return", "def add_poll_participants(self, poll_key, new_participants):\n poll_data = self.get_poll(poll_key)\n participants = poll_data['participants']\n part_emails = participants.values()\n now_str = datetime.datetime.utcnow().isoformat()\n\n new_part_map = {}\n for email in new_participants:\n if email not in part_emails: # No duplicate emails\n # Add new participant key to the Poll\n new_part_key = helpers.generateKeyString(email, 'part_')\n participants[new_part_key] = email\n self.client.set(poll_key, dumps(poll_data))\n # Create new participant record\n new_part_data_raw = {\n 'email': email,\n 'poll': poll_key,\n 'voted': False,\n 'choice': None\n }\n self.set_participant(new_part_key, new_part_data_raw)\n new_part_map[new_part_key] = email\n return new_part_map", "def test_add_many_objects_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = []\n for x in range(doc_count):\n doc = Document()\n doc['user_id'] = user_ids[x]\n doc['data'] = data[x]\n doc['id'] = ids[x]\n documents.append(doc)\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def poll_and_save():\n\tusers = User.query.all()\n\tfor user in users:\n\t\tlogging.debug(\"polling for {}\".format(user))\n\t\t# API call to WEconnect activities-with-events\n\t\tactivity_events = weconnect.get_todays_events(user)\n\t\tlogging.debug(activity_events)\t\n\t\n\tfor activity in activity_events:\n\t\tfor ev in activity[\"events\"]:\n\t\t\tevent = session.query(Event).filter_by(eid == ev[\"eid\"]).first()\n\t\t\tif event:\n\t\t\t\t#update the completion\n\t\t\t\tevent.completed = (ev[\"didCheckin\"] == True)\n\t\t\telse: #eid doesn't exist, add new event\n\t\t\t\tnewEvent = weconnect.createNewEvent(ev)\n\t\t\t\tsession.add(newEvent)\n\ttry:\t\t\n\t\tsession.commit()\n\t\tprint(\"Received {} Activity events in last poll.\").format(len(activity_events))\n\texcept:\n\t\tsession.rollback()\n\t\tprint(\"Session Commit failed\")", "def committers_changes(self) -> Iterator[CommitterChange]:\n for committer_change in self._yaml[\"committers\"]:\n # Start ignoring PyLintBear\n match action := CommitterActions(committer_change[\"action\"]):\n case CommitterActions.ADDITION:\n yield CommitterChange(\n name=committer_change[\"name\"],\n action=action,\n link=committer_change[\"link\"],\n email=committer_change[\"email\"],\n company=committer_change[\"company\"],\n committer_id=committer_change[\"id\"],\n timezone=committer_change[\"timezone\"],\n )\n case CommitterActions.DELETION:\n yield CommitterChange(\n name=committer_change[\"name\"],\n action=action,\n link=committer_change[\"link\"],\n )\n # Stop ignoring", "def test_add_many_implicit_commit(self):\n\n # That one fails in r5 (<commit/> must be made on its own)\n\n doc_count = 10\n user_ids = [get_rand_string() for x in range(doc_count)]\n data = [get_rand_string() for x in range(doc_count)]\n ids = [get_rand_string() for x in range(doc_count)]\n documents = [dict(user_id=user_ids[x], data=data[x], id=ids[x])\n for x in range(doc_count)]\n\n # Pass in the commit flag.\n self.conn.add(documents, True)\n\n results = []\n for id in ids:\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])", "def test_list_events(self):\n\n def clean_up(trainee, trainer):\n self.database.mongo.event.delete_many({\n 'creator_id': ObjectId(trainee._id)\n })\n\n self.database.mongo.event.delete_many({\n 'creator_id': ObjectId(trainer._id)\n })\n\n trainee = self.database.get_trainee_by_username('testtrainee')\n trainer = self.database.get_trainer_by_username('testtrainer')\n\n try:\n clean_up(trainee, trainer)\n event = Event(\n _id=None,\n creator_id=trainee._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainer._id\n )\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainee._id)\n })\n assert database_event is not None\n assert str(database_event['creator_id']) == event.creator_id\n assert str(database_event['participant_id']\n ) == event.participant_id\n\n event = Event(\n _id=None,\n creator_id=trainer._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainee._id\n )\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainer._id)\n })\n assert database_event is not None\n assert str(database_event['creator_id']) == event.creator_id\n assert str(database_event['participant_id']\n ) == event.participant_id\n\n finally:\n clean_up(trainee, trainer)", "def create_participant(name='Not Brian', email='[email protected]') ->\\\n Participant:\n participant = Participant(name=name, email=email)\n return participant", "def test_create_event(self):\n def clean_up(trainee, trainer):\n self.database.mongo.event.delete_many({\n 'title': 'testEvent',\n 'creator_id': ObjectId(trainee._id)\n })\n\n self.database.mongo.event.delete_many({\n 'title': 'testEvent',\n 'creator_id': ObjectId(trainer._id)\n })\n\n trainee = self.database.get_trainee_by_username('testtrainee')\n trainer = self.database.get_trainer_by_username('testtrainer')\n\n try:\n\n clean_up(trainee, trainer)\n\n event = Event(\n _id=None,\n creator_id=trainee._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainer._id\n )\n\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainee._id)\n })\n\n assert database_event['title'] == event.title\n assert str(database_event['creator_id']) == str(event.creator_id)\n assert database_event['date'] == str(event.date)\n assert database_event['title'] == event.title\n assert database_event['description'] == event.description\n assert str(database_event['participant_id']\n ) == event.participant_id\n\n clean_up(trainee, trainer)\n\n event = Event(\n _id=None,\n creator_id=trainer._id,\n title='testEvent',\n date=datetime(2020, 12, 2),\n description='a simple desc',\n participant_id=trainer._id\n )\n\n self.database.create_event(event)\n database_event = self.database.mongo.event.find_one({\n 'title': event.title,\n 'creator_id': ObjectId(trainer._id)\n })\n\n assert database_event['title'] == event.title\n assert str(database_event['creator_id']) == str(event.creator_id)\n assert database_event['date'] == str(event.date)\n assert database_event['title'] == event.title\n assert database_event['description'] == event.description\n assert str(database_event['participant_id']\n ) == event.participant_id\n finally:\n clean_up(trainee, trainer)", "def commits(self):\n p = Popen(['git', 'rev-list', '--all', '--timestamp', '--parents'], \n cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_info = line.split()\n if len(commit_info) < 2:\n print >> sys.stderr, \"error: bad line: %r\" % line\n continue\n timestamp = int(commit_info.pop(0))\n commit_info = map(CommitId, commit_info)\n commit_id = commit_info.pop(0)\n yield (timestamp, commit_id, commit_info)", "def create_commit(self, event_data_yaml):\n os.chdir(str(self.repository_path))\n sh.git.checkout(self.branch)\n sh.git.add(self.event_dir)\n message_body = (\n '\\n\\nEvent config:\\n~~~yaml\\n{}\\n~~~\\n'.format(event_data_yaml)\n + '\\nScraped with [pyvideo_scrape]'\n + '(https://github.com/pyvideo/pyvideo_scrape)')\n if self.minimal_download:\n message = ('Minimal download: '\n + '{}\\n\\nMinimal download executed for #{}'.format(\n self.title, self.issue)\n + '\\n\\nOnly data that needs [no review](https://'\n + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'\n + '\\nThis event needs further scraping and human '\n + 'reviewing for the description and other data to show.'\n + message_body)\n sh.git.commit('-m', message)\n sh.git.push('--set-upstream', 'origin', self.branch)\n # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)\n sh.git.checkout('master')\n else:\n message = (\n 'Scraped {}\\n\\nFixes #{}'.format(self.branch, self.issue)\n + message_body)\n sh.git.commit('-m', message)\n sh.git.checkout('master')\n logger.debug('Conference {} commited', self.branch)", "async def get_participants(self):\n for i in range(self.num):\n def check(m):\n if m.content.lower().strip() == \"i\" and m.author not in self.participants:\n return True\n\n return False\n\n # Wait with a timeout of 2 minutes and check each message with check(m)\n reply = await client.wait_for_message(timeout=120, channel=self.channel, check=check)\n\n if reply: # A user replied with a valid check\n asyncio.ensure_future(\n client.say(self.message,\n \"{} has entered! `{}/{}`. Type `I` to join!\".format(\n reply.author.mention, i + 1, self.num))\n )\n self.participants.append(reply.author)\n\n # Remove the message if bot has permissions\n if self.member.permissions_in(self.channel).manage_messages:\n asyncio.ensure_future(client.delete_message(reply))\n else:\n # At this point we got no reply in time and thus, gathering participants failed\n await client.say(self.message, \"**The {} game failed to gather {} participants.**\".format(\n self.name, self.num))\n started.pop(started.index(self.channel.id))\n\n return False\n\n return True", "def test_users_getting_add_peer_event(self) -> None:\n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n with self.capture_send_event_calls(expected_num_events=5) as events:\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.assertEqual(add_peer_event[\"event\"][\"op\"], \"peer_add\")\n event_sent_to_ids = add_peer_event[\"users\"]\n for user_id in new_user_ids_to_subscribe:\n # Make sure new users subscribed to stream is not in\n # peer_add event recipient list\n self.assertNotIn(user_id, event_sent_to_ids)\n for old_user in orig_user_ids_to_subscribe:\n # Check non-new users are in peer_add event recipient list.\n self.assertIn(old_user, event_sent_to_ids)" ]
[ "0.5298337", "0.5100352", "0.49935168", "0.49894413", "0.4963217", "0.49325606", "0.49297416", "0.48516214", "0.48137006", "0.48101404", "0.47957727", "0.47957727", "0.47882983", "0.47866845", "0.4747633", "0.47383985", "0.47283602", "0.46981072", "0.4695059", "0.46948302", "0.46834564", "0.45978653", "0.45901513", "0.45882165", "0.4588152", "0.45868716", "0.4582993", "0.45403197", "0.45310643", "0.45178393" ]
0.69607586
0
Derive a secure Fernet key from arbitrary input password.
def derive_fernet_key(password, salt): kdf = pbkdf2.PBKDF2HMAC( algorithm=hashes.SHA256(), length=32, salt=encoding.force_bytes(salt), iterations=100000, backend=default_backend() ) return base64.urlsafe_b64encode(kdf.derive( encoding.force_bytes(password)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_key(password, iterations=ITERATIONS):\n key = PBKDF2(password, SALT, dkLen=KEY_LENGTH_BYTES, count=iterations)\n return key", "def get_fernet_key(app: Sanic, passphrase: str) -> bytes:\n salted = (passphrase + app.secret_key).encode()\n key = hashlib.sha256(salted).digest()[:32]\n return base64.urlsafe_b64encode(key)", "def generate_key_from_password(pwd, salt=None):\n # https://cryptography.io/en/latest/fernet/#using-passwords-with-fernet\n password = base64.urlsafe_b64encode(pwd.encode())\n if salt is None:\n salt = os.urandom(16)\n kdf = PBKDF2HMAC(\n algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=settings.HASH_ITERATIONS,\n backend=default_backend()\n )\n key = base64.urlsafe_b64encode(kdf.derive(password))\n return key", "def gen_Fernet_key():\n\tkey = Fernet.generate_key()\n\treturn key", "def password_to_key(password: str):\r\n curve = ec.SECP256R1() # Elliptic curve\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(password.encode())\r\n password_int = int.from_bytes(digest.finalize(), \"big\")\r\n return ec.derive_private_key(password_int, curve)", "def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')", "def _derive_key(\n self, passphrase: str, otp: YubikeyOTP, *args : bytes\n ) -> bytes:\n return self._context_kdf.derive(\n combine_keys(\n passphrase.encode('utf-8'),\n otp.token.private_uid,\n *args\n )\n )", "def create_crypt_key():\n\n crypt_key = Fernet.generate_key() # key is type = bytes\n\n crypt_query = 'INSERT INTO Crypt (crypt_key) VALUES (%s)'\n my_cursor.execute(crypt_query, (crypt_key,))\n pw_db.commit()", "def decrypt_password(pass_to_decrypt):\n\n pass_to_decrypt = fk.decrypt(pass_to_decrypt)\n return pass_to_decrypt.decode()", "def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)", "def generate_symmetric_key():\n return Fernet.generate_key()", "def any_text_to_fernet_key(self, text):\n md5 = fingerprint.fingerprint.of_text(text)\n fernet_key = base64.b64encode(md5.encode(\"utf-8\"))\n return fernet_key", "def derive_key(shared_key,algorithm):\n\tif algorithm == 'SHA256':\n\t\talgorithm = hashes.SHA256()\n\tif algorithm == 'SHA512':\n\t\talgorithm = hashes.SHA512()\n\tderived_key = HKDF(\n\t\t\t\talgorithm = algorithm,\n\t\t\t\tlength = 32,\n\t\t\t\tsalt = None,\n\t\t\t\tinfo=b'handshake data',\n\t\t\t\tbackend = backend\n\t\t\t).derive(shared_key)\n\treturn derived_key", "def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)", "def dblencrypt(input, fromkey, tokey):\n return encrypt(unencrypt(input, fromkey), tokey)", "def _create_fernet_key(self) -> str:\n\n client = boto3.client(\"ssm\", endpoint_url=os.environ.get(\"AWS_ENDPOINT\"))\n\n try:\n response = client.get_parameter(Name=self.object_name, WithDecryption=True)\n return response[\"Parameter\"][\"Value\"]\n except client.exceptions.ParameterNotFound:\n return Fernet.generate_key().decode()", "def scrypt(salt: bytes, N: int, password: bytes) -> bytes:\n kdf = Scrypt(salt=salt, length=32, n=N, r=8, p=1, backend=default_backend())\n return kdf.derive(password)", "def generate_key(self):\n\n self.key = Fernet.generate_key()\n self.cryptor = Fernet(self.key)", "def encrypt_password(password: str) -> str:\n return pwd_context.hash(password)", "def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)", "def get_key(name):\n import os\n salt = os.urandom(16)\n name = name.encode()\n from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.backends import default_backend\n kdf = PBKDF2HMAC(algorithm=hashes.SHA256(),\n length=32,\n salt=salt,\n iterations=100000,\n backend=default_backend())\n import base64\n key = base64.urlsafe_b64encode(kdf.derive(name))\n return key", "def passwd_decryption(self):\n with open(self.key_path, 'rb') as input_key:\n for line in input_key:\n key = line\n with open(self.pass_path, 'rb') as input_password:\n for line in input_password:\n password = line\n cipher_suit = Fernet(key)\n plain_password = cipher_suit.decrypt(password)\n plain_password = bytes(plain_password).decode('utf-8')\n \n return plain_password", "def password_encryption(self, password):\n return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())", "def generate_password(plain_password, salt):\n return crypt(plain_password, \"$6$%s\" % salt)", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def hashPassword(self, password):\n key = hashlib.pbkdf2_hmac(\n 'sha256',\n str.encode(password),\n self.salt,\n 100000\n )\n return key", "def recover_encrypt_pass(self):\n with open(self.key_path) as input_file:\n key = input_file.readlines()\n cipher_suite = Fernet(key[0])\n bin_passwd = bytes(self.password, 'utf-8')\n ciphered_text = cipher_suite.encrypt(bin_passwd)\n return ciphered_text", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def encrypt_password(cls, password):\n return generate_password_hash(password)", "def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)" ]
[ "0.6870849", "0.65315944", "0.647991", "0.6411447", "0.6401939", "0.6350924", "0.6326737", "0.6111555", "0.6109095", "0.6086658", "0.60474956", "0.60229516", "0.59270436", "0.5917966", "0.5900764", "0.5893523", "0.5871855", "0.5840717", "0.58245474", "0.5807514", "0.57842696", "0.57558787", "0.57389504", "0.5730108", "0.57119733", "0.57069165", "0.57012266", "0.5695262", "0.56863666", "0.5679184" ]
0.7723482
0
If we get a response, add the node to the routing table. If we get no response, make sure it's removed from the routing table.
def handleCallResponse(self, result, node): if result[0]: self.log.info("got response from %s, adding to router" % node) _log.debug("got response from %s, adding to router" % node) if self.router.isNewNode(node): self.transferKeyValues(node) self.router.addContact(node) else: self.log.debug("no response from %s, removing from router" % node) _log.debug("no response from %s, removing from router" % node) self.router.removeContact(node) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def update_routing_table(self, node_id: Optional[DHTID], peer_id: PeerID, responded=True):\n node_id = node_id if node_id is not None else self.routing_table.get(peer_id=peer_id)\n if responded: # incoming request or outgoing request with response\n if node_id not in self.routing_table:\n # we just met a new node, maybe we know some values that it *should* store\n data_to_send: List[Tuple[DHTID, BinaryDHTValue, DHTExpiration]] = []\n for key, item in list(self.storage.items()):\n neighbors = self.routing_table.get_nearest_neighbors(key, self.num_replicas, exclude=self.node_id)\n if neighbors:\n nearest_distance = neighbors[0][0].xor_distance(key)\n farthest_distance = neighbors[-1][0].xor_distance(key)\n new_node_should_store = node_id.xor_distance(key) < farthest_distance\n this_node_is_responsible = self.node_id.xor_distance(key) < nearest_distance\n if not neighbors or (new_node_should_store and this_node_is_responsible):\n data_to_send.append((key, item.value, item.expiration_time))\n if data_to_send:\n asyncio.create_task(self.call_store(peer_id, *zip(*data_to_send), in_cache=False))\n\n maybe_node_to_ping = self.routing_table.add_or_update_node(node_id, peer_id)\n if maybe_node_to_ping is not None:\n # we couldn't add new node because the table was full. Check if existing peers are alive (Section 2.2)\n # ping one least-recently updated peer: if it won't respond, remove it from the table, else update it\n asyncio.create_task(self.call_ping(maybe_node_to_ping[1])) # [1]-th element is that node's peer_id\n\n else: # we sent outgoing request and peer did not respond\n if node_id is not None and node_id in self.routing_table:\n del self.routing_table[node_id]", "def addNode(self, node: dht.node.Node):\n \n bucket = self._findBucket(node)\n if bucket == None:\n raise Exception(\"Found no bucket for given id\")\n \n if not node in bucket:\n # We do not have this node on our routing table yet;\n # attempt to add it.\n if len(bucket) < MAX_NODES_PER_BUCKET:\n bucket.append(node)\n else:\n if bucket.inRange(myID):\n # Our own node's ID is in the appropriate bucket's range,\n # split the bucket and recursively attempt to add the node.\n self._splitBucket(bucket)\n self.addNode(node)\n else:\n # TODO: handle this\n pass", "def welcomeIfNewNode(self, node):\n if not self.router.isNewNode(node):\n return\n\n log.info(\"never seen %s before, adding to router\", node)\n for key, value in self.storage.items():\n keynode = Node(digest(key))\n neighbors = self.router.findNeighbors(keynode)\n if len(neighbors) > 0:\n last = neighbors[-1].distanceTo(keynode)\n newNodeClose = node.distanceTo(keynode) < last\n first = neighbors[0].distanceTo(keynode)\n thisNodeClosest = self.sourceNode.distanceTo(keynode) < first\n if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):\n values_to_republish = []\n\n try:\n parsed_val = json.loads(value)\n if isinstance(parsed_val, list):\n [values_to_republish.append(json.dumps(val)) for val in parsed_val]\n else:\n values_to_republish.append(value)\n\n for val in values_to_republish:\n asyncio.ensure_future(self.callStore(node, key, val))\n\n except Exception as ex:\n log.exception(ex)\n continue\n\n self.router.addContact(node)", "def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)", "def welcome_if_new(self, node):\n if not self.router.is_new_node(node):\n return\n\n self.log(\"never seen %s before, adding to router\" % node)\n #for key, value in self.storage:\n for key in self.storage.keys():\n value = self.storage[key]\n keynode = Node(digest(key))\n neighbors = self.router.find_neighbors(keynode)\n if neighbors:\n last = neighbors[-1].distance_to(keynode)\n new_node_close = node.distance_to(keynode) < last\n first = neighbors[0].distance_to(keynode)\n this_closest = self.source_node.distance_to(keynode) < first\n if not neighbors or (new_node_close and this_closest):\n asyncio.ensure_future(self.call_store(node, key, value))\n self.router.add_contact(node)", "def test_handle_response_nodes_do_not_update_nearest_node(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n lookup._lookup = mock.MagicMock()\n old_nearest_node = lookup.nearest_node\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n shortlist = tuple([(p.public_key, p.version, p.uri) for p\n in lookup.shortlist])\n msg = Nodes(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, shortlist)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertEqual(lookup.nearest_node, old_nearest_node)\n self.assertEqual(lookup.nearest_node, lookup.shortlist[0])\n self.assertEqual(lookup._lookup.call_count, 0)", "def add_node(self, node):\n \n if node in self.node_set:\n return \n \n self.num_node = self.num_node + 1\n self.node_set.add(node)\n self.prefix[node] = {}\n self.suffix[node] = {}", "def AddNode(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def add_node(self, node):\n if node not in self.nodes:\n self._nodes.append(node)", "def addNode(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node] = []", "def test_handle_response_nodes_update_nearest_node(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n lookup._lookup = mock.MagicMock()\n old_nearest_node = lookup.nearest_node\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n msg = Nodes(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal,\n self.remote_nodes)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotEqual(lookup.nearest_node, old_nearest_node)\n self.assertEqual(lookup.nearest_node, lookup.shortlist[0])\n self.assertEqual(lookup._lookup.call_count, 1)", "def handleCallResponse(self, result, node):\n if not result[0]:\n log.warning(\"no response from %s, removing from router\", node)\n self.router.removeContact(node)\n return result\n\n log.info(\"got successful response from %s\", node)\n self.welcomeIfNewNode(node)\n return result", "def add_node(self, node: Node):\n if node not in self.__graph_dict:\n self.__graph_dict[node] = []", "def insert(self, path, handler=None):\n if path in self.children: # Return None if path already exists\n print('Warning: Path already exists!')\n return\n else:\n self.children[path] = RouteTrieNode(handler) # Add character if it doesn't exist", "def handle_call_response(self, result, node):\n if not result[0]:\n self.log(\"!! no response from %s, removing from router\", node)\n self.router.remove_contact(node)\n return result\n\n self.log(\"got successful response from %s\" % node)\n self.welcome_if_new(node)\n return result", "def add_node(self, node):", "def insert(self, path_step, handler):\n if path_step not in self.children:\n self.children[path_step] = RouteTrieNode(handler)", "def add_node(self, node):\r\n self.undeclared_nodes.append(node)", "def test_handle_response_nodes_adds_closest_nodes_to_shortlist(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n msg = Nodes(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal,\n self.remote_nodes)\n response = asyncio.Future()\n response.set_result(msg)\n self.assertNotEqual(lookup.shortlist, list(self.nodes))\n lookup._handle_response(uuid, contact, response)\n self.assertEqual(lookup.shortlist, list(self.nodes))", "def addNode(self, newNode):\n if newNode not in self.graph.keys():\n self.graph[newNode] = []", "def add(self, node):\n self.steps += 1\n self.path.append(node)\n self.visited.add(node)\n if node in self.targets[0]:\n self.targets[0].remove(node)", "def store_response(self, new_response):\n self.responses.append(new_response)", "def add_node(self, node, parent):\n if node not in self.map.edges:\n self.map.edges[node] = []\n if parent not in self.map.edges:\n self.map.edges[parent] = [node]\n else:\n self.map.edges[parent].append(node)", "def add_node(self, node):\n self.nodes.append(node)\n self.edges[node.identifier] = {}\n self._id2node[node.identifier] = node\n node.parent = None", "def add_node(self, node):\n self._nodes[node.id] = node\n self._clear_cache()", "def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)", "def node_no_route_found_for_packet(self, node, packet):\n for subscriber in self.subscribers:\n subscriber.node_no_route_found_for_packet(node, packet)", "def add_node(self, node: Node) -> None:\n\t\t# Check for conflicts with current nodes; iterate over nodes\n\t\tfor index in range(len(self.nodes)):\n\t\t\t# Exit if comparison fails. Node can update itself from the compare() method\n\t\t\tif not self.nodes[index].compare(node):\n\t\t\t\treturn\n\n\t\t# Add the Node if no conflicts\n\t\tself.nodes.append(node)", "def add_node(self, val):\n if val in self._g:\n raise ValueError('Node already exists.')\n self._g[val] = []", "def add_node(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node]=[]\n self.nodes.add(node)" ]
[ "0.64593494", "0.5994604", "0.5895253", "0.58183753", "0.5763807", "0.5725724", "0.5684319", "0.5683615", "0.56009513", "0.5568529", "0.5514875", "0.55026144", "0.5476952", "0.5465007", "0.5445385", "0.5436398", "0.54311246", "0.53875333", "0.5359572", "0.5351444", "0.5330344", "0.5326482", "0.5312679", "0.52846396", "0.5282091", "0.5279393", "0.52599", "0.52450466", "0.5243389", "0.5237762" ]
0.65867233
0
Given a new node, send it all the keys/values it should be storing.
def transferKeyValues(self, node): _log.debug("**** transfer key values %s ****" % node) ds = [] for key, value in self.storage.iteritems(): keynode = Node(digest(key)) neighbors = self.router.findNeighbors(keynode) _log.debug("transfer? nbr neighbors=%d, key=%s, value=%s" % (len(neighbors), base64.b64encode(key), str(value))) if len(neighbors) > 0: newNodeClose = node.distanceTo(keynode) < neighbors[-1].distanceTo(keynode) thisNodeClosest = self.sourceNode.distanceTo(keynode) < neighbors[0].distanceTo(keynode) if len(neighbors) == 0 or (newNodeClose and thisNodeClosest): if key in self.set_keys: _log.debug("transfer append key value key=%s, value=%s" % (base64.b64encode(key), str(value))) ds.append(self.callAppend(node, key, value)) else: _log.debug("transfer store key value key=%s, value=%s" % (base64.b64encode(key), str(value))) ds.append(self.callStore(node, key, value)) return defer.gatherResults(ds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def welcomeIfNewNode(self, node):\n if not self.router.isNewNode(node):\n return\n\n log.info(\"never seen %s before, adding to router\", node)\n for key, value in self.storage.items():\n keynode = Node(digest(key))\n neighbors = self.router.findNeighbors(keynode)\n if len(neighbors) > 0:\n last = neighbors[-1].distanceTo(keynode)\n newNodeClose = node.distanceTo(keynode) < last\n first = neighbors[0].distanceTo(keynode)\n thisNodeClosest = self.sourceNode.distanceTo(keynode) < first\n if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):\n values_to_republish = []\n\n try:\n parsed_val = json.loads(value)\n if isinstance(parsed_val, list):\n [values_to_republish.append(json.dumps(val)) for val in parsed_val]\n else:\n values_to_republish.append(value)\n\n for val in values_to_republish:\n asyncio.ensure_future(self.callStore(node, key, val))\n\n except Exception as ex:\n log.exception(ex)\n continue\n\n self.router.addContact(node)", "def _put(self, key, value, current_node):\n pass", "def store_node(self, node):\n if not self.light.hasAttr(self.message_storage_attr_name):\n pm.addAttr(\n self.light,\n ln=self.message_storage_attr_name,\n m=1\n )\n\n node.message >> self.light.attr(self.message_storage_attr_name).next_available", "def welcome_if_new(self, node):\n if not self.router.is_new_node(node):\n return\n\n self.log(\"never seen %s before, adding to router\" % node)\n #for key, value in self.storage:\n for key in self.storage.keys():\n value = self.storage[key]\n keynode = Node(digest(key))\n neighbors = self.router.find_neighbors(keynode)\n if neighbors:\n last = neighbors[-1].distance_to(keynode)\n new_node_close = node.distance_to(keynode) < last\n first = neighbors[0].distance_to(keynode)\n this_closest = self.source_node.distance_to(keynode) < first\n if not neighbors or (new_node_close and this_closest):\n asyncio.ensure_future(self.call_store(node, key, value))\n self.router.add_contact(node)", "def test_newkey(self):\n d = {\n \"action\": \"set\",\n \"node\": {\n \"expiration\": \"2013-09-14T00:56:59.316195568+02:00\",\n \"modifiedIndex\": 183,\n \"key\": u(\"/testkey\"),\n \"ttl\": 19,\n \"value\": \"test0\",\n },\n }\n\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n zeroth = res.header.revision\n d[\"node\"][\"value\"] = \"test1\"\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n self.assertEqual(zeroth + 1, res.header.revision)\n self.assertEqual(self.client.get(d[\"node\"][\"key\"])[0], b(d[\"node\"][\"value\"]))", "def add_node(self, node):\n try:\n self.dict.setdefault(node, OrderedDict())\n except (AttributeError, TypeError):\n raise \"Node Value must be hashable value\"", "def test_0_put(self):\n self.assertIsNotNone(save_node_info(self.node.name, self.node))", "def append(self, key, value):\n dkey = digest(key)\n node = Node(dkey)\n\n def append_(nodes):\n # if this node is close too, then store here as well\n if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n try:\n pvalue = json.loads(value)\n self.set_keys.add(dkey)\n if dkey not in self.storage:\n _log.debug(\"%s local append key: %s not in storage set value: %s\" % (base64.b64encode(node.id), base64.b64encode(dkey), pvalue))\n self.storage[dkey] = value\n else:\n old_value_ = self.storage[dkey]\n try:\n old_value = json.loads(old_value_)\n new_value = list(set(old_value + pvalue))\n except:\n # When the key have been used for single values it does not contain a list\n # When have been deleted contains None\n # Just replace old value\n new_value = pvalue\n old_value = old_value_\n _log.debug(\"%s local append key: %s old: %s add: %s new: %s\" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value))\n self.storage[dkey] = json.dumps(new_value)\n except:\n _log.debug(\"Trying to append something not a JSON coded list %s\" % value, exc_info=True)\n ds = [self.protocol.callAppend(n, dkey, value) for n in nodes]\n return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to set key %s\" % key)\n _log.debug(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find().addCallback(append_)", "def save_node(self, node: Union[dict, Node]) -> Node:", "def write_new_nodes(self, nodes):\n\n if len(self._cache) > self._cache_max_size:\n # The size of the cache has exceeded the threshold. Discard the\n # old cache values (but still store the new nodes into the\n # cache):\n logger.debug('Clearing node cache')\n self._cache.clear()\n\n data = {}\n max_node_id = 0\n for node in nodes:\n max_node_id = max(max_node_id, node.id)\n data[node.id] = self._dump(node._entries)\n self._cache[node.id] = node._entries\n\n self.db[len(self._max_node_ids)] = data\n\n if max_node_id == 0:\n # Rewrite last value:\n self._max_node_ids.append(self._max_node_ids[-1])\n else:\n self._max_node_ids.append(max_node_id)", "def push(self, nodename: str, key: str, val) -> bool:\n if nodename in self._d:\n self._d[nodename][0][key] = (val, time.time())\n return True\n else:\n return False", "def copy_node(self, from_: str, to_: str):\n self._nodes[to_] = dict(self._nodes[from_])", "def add_node(old_node_dict, old_to_new_node_ids_dict, new_accession, new_db_api, aliases):\n\n # getting the old node id, and the old node's properties\n old_node_id = old_node_dict[\"id\"]\n old_node_alt_accession = old_node_dict[\"alt_accession\"]\n old_node_name = old_node_dict[\"name\"]\n tax_id = old_node_dict[\"tax_id\"]\n pathways = old_node_dict[\"pathways\"]\n\n if aliases:\n aliases += \"|\" + old_node_dict[\"name\"]\n else:\n aliases = old_node_dict[\"name\"]\n\n if old_node_dict[\"aliases\"]:\n aliases += \"|\" + old_node_dict[\"aliases\"]\n\n new_node_dict = {\n \"name\" : new_accession,\n \"alt_accession\" : old_node_alt_accession,\n \"tax_id\" : tax_id,\n \"pathways\" : pathways,\n \"aliases\" : aliases,\n \"topology\": \"\"\n }\n\n # inserting the node to the PSI-MI SQLite\n new_db_api.insert_unique_node(new_node_dict)\n new_node_dict['id'] = new_db_api.last_row_id\n # getting the new last row id of the inserted node\n new_node_id = new_node_dict['id']\n\n # if the node maps to more than one swissprot uniprot id it will be inserted for every swissprot id and\n # this function will be called for every insertion\n if not old_to_new_node_ids_dict.has_key(old_node_id):\n old_to_new_node_ids_dict[old_node_id] = [new_node_id]\n else:\n old_to_new_node_ids_dict[old_node_id].append(new_node_id)", "def nodes(self, nodes):\n global g_npoints\n for osmid, tags, (lng, lat) in nodes:\n if 'name' in tags:\n\n # Build a synthetic value by copying the tags and\n # adding osmid, latitude and longitude.\n valobj = tags.copy()\n valobj['osmid'] = osmid\n valobj['latitude'] = lat\n valobj['longitude'] = lng\n valstr = json.dumps(valobj)\n\n # Construct a GeoJSON bin value to be indexed.\n locobj = { 'type': \"Point\", 'coordinates': [ lng, lat ] }\n locgeo = aerospike.GeoJSON(locobj)\n\n # Make a hash of the id to use for random selection.\n hshval = self.id_to_hash(osmid)\n\n key = (self.args.nspace, self.args.set, osmid)\n \n self.client.put(key, { VALBIN: valstr,\n LOCBIN: locgeo,\n MAPBIN: valobj,\n HSHBIN: hshval },\n policy={ 'timeout': 10000,\n 'retry': 10 })\n\n self.npoints += 1\n if self.npoints % 1000 == 0:\n sys.stderr.write('.')", "def insert_node(self, node_tup):\n signature = hashlib.sha256((node_tup[0]+node_tup[4]).encode('utf-8')).hexdigest()\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"INSERT INTO nodes VALUES (:ip, :port, uname, :signature)\", {\"ip\":node_tup[0], \"port\":node_tup[1], \"uname\":node_tup[2], \"verifying_key\":node_tup[3]})\n app_process.commit()\n app_process.close()", "def save_node(self, node: Node):", "def store(self, key, headers, value):", "def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)", "def __setitem__(self, nodename, node):\n\n for hash_ in self._repl_iterator(nodename):\n if hash_ in self._nodes:\n raise ValueError(\"Node name %r is \"\n \"already present\" % nodename)\n self._nodes[hash_] = node\n bisect.insort(self._keys, hash_)", "def setNodeSubscriptionKey(self,node,key):\n post_data = {'key': str(key)}\n data = self.connect('put',\"nodes/%s/subscription\" % (node), post_data)\n return data", "def add_node(self, n):\r\n keys = self.d.keys()\r\n #check for node in graph\r\n if n not in keys:\r\n self.d.update({str(n): set()})", "def store(self, key, value):\n pass", "def add_node(self, node):", "def send_node_props(self, host_info):\n se = get_se()\n version = get_version()\n name = host_info.get_hostname()\n unique_id = '%s:Pool:%s' % (se, name)\n parent_id = \"%s:SE:%s\" % (se, se)\n\n sa = StorageElement.StorageElement()\n sar = StorageElementRecord.StorageElementRecord()\n sa.UniqueID(unique_id)\n sa.Name(name)\n sa.SE(se)\n sa.SpaceType(\"Pool\")\n sa.Implementation(XRD_NAME)\n sa.Version(version)\n sa.Status(XRD_STATUS)\n sa.ParentID(parent_id)\n sa.Timestamp(timestamp)\n sar.Timestamp(timestamp)\n sar.UniqueID(unique_id)\n sar.MeasurementType(\"raw\")\n sar.StorageType(\"disk\")\n sar.TotalSpace(1024*host_info.get_total_kb())\n sar.FreeSpace(1024*host_info.get_total_free_kb())\n sar.UsedSpace(1024*host_info.get_total_used_kb())\n Gratia.Send(sa)\n Gratia.Send(sar)", "def post(self):\n node_id = blockchain.register_node(request.host)\n\n return {\n 'message': 'New node have been added.',\n 'node_id': node_id,\n 'nodes': list(blockchain.nodes)\n }, 201", "def qnode_keys(self, qnode_keys):\n\n self._qnode_keys = qnode_keys", "def __setitem__(self,key,value):\n assert isinstance(key,int)\n if isinstance(value,str):\n super().__setitem__(key,Node(key,value))\n else:\n assert value.nodeid == key\n super().__setitem__(key,value)", "def add_node(self, n):\n self.node_dict.setdefault(n, OrderedDict())", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def update_node_attribute(\n self, node: str, attr_key: str, attr_value: Any, preserve: bool = False\n ) -> Dict:\n node_data = self.graph.nodes[node]\n updated = prepare_data_dict(\n node_data, {attr_key: attr_value}, preserve=preserve\n )\n self.graph.add_node(node, **updated)\n return updated" ]
[ "0.68433034", "0.6387898", "0.6231363", "0.6224902", "0.6179758", "0.5944865", "0.58915156", "0.5804389", "0.57851076", "0.57538104", "0.5727421", "0.57156307", "0.57064646", "0.5705826", "0.5686158", "0.5680778", "0.563601", "0.5572478", "0.5572478", "0.5489811", "0.5487325", "0.54851115", "0.5467798", "0.5458484", "0.54430634", "0.54224133", "0.5414832", "0.53423345", "0.53253156", "0.5324348" ]
0.66873896
1
Bootstrap the server by connecting to other known nodes in the network.
def bootstrap(self, addrs): # if the transport hasn't been initialized yet, wait a second if self.protocol.transport is None: return task.deferLater(reactor, .2, self.bootstrap, addrs) else: _log.debug("AppendServer.bootstrap(%s)" % addrs) return Server.bootstrap(self, addrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_bootstrap(self, args):\n pass", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def bootstrap(self, params = None):\n command = \"\"\n print \"NODE: [%s] running bootstrap scripts\" % self.name\n if self.type == \"SEED\":\n command += get_script_text(\"cassandra_seednode_bootstrap\")\n elif self.type == \"CLIENT\":\n if self.name.endswith('1'):\n command += get_script_text(\"ganglia_endpoint\")\n command += get_script_text(\"cassandra_client_bootstrap\")\n\n else:\n command = get_script_text(\"cassandra_node_bootstrap\")\n timer = Timer.get_timer()\n self.vm.run_command(command, silent=True)\n print \"NODE: %s is now bootstrapped (took %d sec)\" % (self.name, timer.stop())\n self.bootstraped = True", "def connect_to_bootstrap_node(self):\n if not is_ipfs_on():\n start_ipfs_daemon()\n if is_ipfs_on():\n raise IpfsNotConnected\n\n # cmd = [\"ipfs\", \"bootstrap\", \"list\"]\n # output = run(cmd)\n # s = StringIO(output)\n peers = self.client.bootstrap.list()[\"Peers\"]\n peer_address = None\n for peer in peers:\n if re.search(r\"/ip4/\", peer) is not None:\n peer_address = peer\n break\n else:\n return False\n\n print(f\"==> Trying to connect into {peer_address} using swarm connect\")\n output = self.client.swarm.connect(peer_address)\n if (\"connect\" and \"success\") in str(output):\n log(str(output), \"bg\")\n return True\n\n return False", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "def join_network(self):\n connect_nodes_bi(self.nodes, 1, 2)\n self.sync_all()", "def bootstrap(self):\n\n self.db = connection_manager.get(DbConnection, host=self.ip, port=3306, user=self.user, password=self.password)\n\n self.connected = True", "def startup(self):\n for v in self.virt_nodes:\n v.create()\n \n \"\"\" scan for nodes \"\"\"\n self.scan_for_nodes()\n \n \"\"\" connect to all nodes and call setup \"\"\"\n for n in self.scan_nodes:\n n.connect()\n \n ''' list of open addresses for the node '''\n oalist = []\n \n ''' if the multicast interface is defined use it as open address '''\n if self.mcast_interface != \"\":\n oalist.append(self.mcast_interface)\n \n ''' open the connection to the default address of the slave '''\n oalist.append(socket.gethostbyname(socket.gethostname()))\n \n ''' read the monitor node list '''\n monitor_list = open(os.path.join(self.workdir, \"monitor-nodes.txt\"), \"r\")\n for maddress in monitor_list.readlines():\n oalist.append(maddress.strip())\n \n ''' call the setup procedure '''\n n.setup(oalist)", "def start_peers(self):\n for i in self.nodes:\n i.start()", "def bootstrap_p2p(self, *, num_connections=1):\n for _ in range(num_connections):\n self.nodes[0].add_p2p_connection(P2PDataStore())\n self.nodes[0].p2p.wait_for_verack()", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def bootstrap(ctrl):\n conf = TorConfig(ctrl)\n conf.post_bootstrap.addCallback(setup_done).addErrback(setup_fail)\n log.msg(\"Tor process connected, bootstrapping ...\")", "def bootstrap(self, addrs):\n # if the transport hasn't been initialized yet, wait a second\n if self.protocol.transport is None:\n return task.deferLater(reactor, 1, self.bootstrap, addrs)\n\n def initTable(results):\n nodes = []\n for addr, result in results.items():\n if result[0]:\n nodes.append(Node(result[1], addr[0], addr[1]))\n spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha)\n return spider.find()\n\n ds = {}\n for addr in addrs:\n ds[addr] = self.protocol.ping(addr, self.node.id)\n d = deferredDict(ds)\n d.addCallback(initTable)\n d.addErrback(self.onError)\n return d", "def start(self, iface='', network='', bootstrap=[], cb=None, name=None, nodeid=None):\n from urlparse import urlparse\n import socket\n _log.info(\"PROXY start\")\n o=urlparse(self.master_uri)\n fqdn = socket.getfqdn(o.hostname)\n self._server_node_name = fqdn.decode('unicode-escape')\n self.node.network.join([self.master_uri],\n callback=CalvinCB(self._start_link_cb, org_cb=cb),\n corresponding_server_node_names=[self._server_node_name])", "def _ready_litnodes(self):\n # Start lit node 0 and open websocket connection\n self.add_litnode()\n self.litnodes[0].args.extend([self.coins[0][\"wallit_code\"], \"127.0.0.1\"])\n self.litnodes[0].start_node()\n self.litnodes[0].add_rpc_connection(\"127.0.0.1\", \"8001\")\n\n # Start lit node 1 and open websocket connection\n self.add_litnode()\n self.litnodes[1].args.extend([\"-rpcport\", \"8002\", self.coins[0][\"wallit_code\"], \"127.0.0.1\"])\n self.litnodes[1].start_node()\n self.litnodes[1].add_rpc_connection(\"127.0.0.1\", \"8002\")\n\n self.log.info(\"Wait until lit nodes are sync'ed\")\n wait_until(lambda: self.litnodes[0].get_height(self.coins[0]['code']) == 500)\n wait_until(lambda: self.litnodes[1].get_height(self.coins[0]['code']) == 500)\n\n self.log.info(\"Connect lit nodes\")\n res = self.litnodes[0].Listen(Port=\"127.0.0.1:10001\")[\"result\"]\n self.litnodes[0].lit_address = res[\"Adr\"] + '@' + res[\"LisIpPorts\"][0]\n\n res = self.litnodes[1].Connect(LNAddr=self.litnodes[0].lit_address)\n assert not res['error']\n\n # Check that litnode0 and litnode1 are connected\n wait_until(lambda: len(self.litnodes[0].ListConnections()['result']['Connections']) == 1)\n assert_equal(len(self.litnodes[1].ListConnections()['result']['Connections']), 1)\n self.log.info(\"lit nodes connected\")", "def connect_to_master():", "def start_servers(self, **kwargs):\n self.cleanup()\n\n # Start up the API and default conductor server\n\n # We start the conductor server first, as the API server config\n # depends on the conductor port - this ordering allows for\n # retrying the launch on a port clash\n self.start_with_retry(self.conductor_server, 'conductor_port', 3,\n **kwargs)\n kwargs['conductor_port'] = self.conductor_server.bind_port\n\n self.start_with_retry(self.api_server, 'api_port', 3, **kwargs)", "def __init__(self, listen_connection, bootstrap_connection = ('router.bittorrent.com', 6881),\n\t\t\tuser_setup = {}, user_router = None):\n\t\tsetup = {'discover_t': 180, 'check_t': 30, 'check_N': 10}\n\t\tsetup.update(user_setup)\n\t\tself._log = logging.getLogger(self.__class__.__name__ + '.%s.%d' % listen_connection)\n\t\tself._log.info('Starting DHT node with bootstrap connection %s:%d' % bootstrap_connection)\n\t\tlisten_connection = (socket.gethostbyname(listen_connection[0]), listen_connection[1])\n\t\t# Generate key for token generation\n\t\tself._token_key = os.urandom(20)\n\t\t# Start KRPC server process and Routing table\n\t\tself._krpc = KRPCPeer(listen_connection, self._handle_query)\n\t\tif not user_router:\n\t\t\tuser_router = DHT_Router('%s.%d' % listen_connection, setup)\n\t\tself._nodes = user_router\n\t\tself._node = DHT_Node(listen_connection, os.urandom(20))\n\t\tself._node_lock = threading.RLock()\n\t\t# Start bootstrap process\n\t\ttry:\n\t\t\ttmp = self.ping(bootstrap_connection, sender_id = self._node.id).get_result(timeout = 1)\n\t\texcept Exception:\n\t\t\traise\n\t\t\ttmp = {b'ip': encode_connection(listen_connection), b'r': {b'id': self._node.id}}\n\t\tself._node.connection = decode_connection(tmp[b'ip'])\n\t\tself._bootstrap_node = self._nodes.register_node(bootstrap_connection, tmp[b'r'][b'id'])\n\t\t# BEP #0042 Enable security extension\n\t\tlocal_id = bytearray(self._node.id)\n\t\tbep42_value = encode_uint32(bep42_prefix(self._node.connection[0], local_id[-1], local_id[0]))\n\t\tself._node.set_id(bep42_value[:3] + self._node.id[3:])\n\t\tassert(valid_id(self._node.id, self._node.connection))\n\t\tself._nodes.protect_nodes([self._node.id])\n\n\t\t# Start maintainance threads\n\t\tself._threads = ThreadManager(self._log.getChild('maintainance'))\n\n\t\t# Periodically ping nodes in the routing table\n\t\tdef _check_nodes(N, last_ping = 15 * 60, timeout = 5):\n\t\t\tdef get_unpinged(n):\n\t\t\t\treturn time.time() - n.last_ping > last_ping\n\t\t\tcheck_nodes = list(self._nodes.get_nodes(N, expression = get_unpinged))\n\t\t\tif not check_nodes:\n\t\t\t\treturn\n\t\t\tself._log.debug('Starting cleanup of known nodes')\n\t\t\tnode_result_list = []\n\t\t\tfor node in check_nodes:\n\t\t\t\tnode.last_ping = time.time()\n\t\t\t\tnode_result_list.append((node, node.id, self.ping(node.connection, self._node.id)))\n\t\t\tt_end = time.time() + timeout\n\t\t\tfor (node, node_id, async_result) in node_result_list:\n\t\t\t\tresult = self._eval_dht_response(node, async_result, timeout = max(0, t_end - time.time()))\n\t\t\t\tif result and (node.id != result.get(b'id')): # remove nodes with changing identities\n\t\t\t\t\tself._nodes.remove_node(node, force = True)\n\t\tself._threads.start_continuous_thread(_check_nodes, thread_interval = setup['check_t'], N = setup['check_N'])\n\n\t\t# Try to discover a random node to populate routing table\n\t\tdef _discover_nodes():\n\t\t\tself._log.debug('Starting discovery of random node')\n\t\t\tfor idx, entry in enumerate(self.dht_find_node(os.urandom(20), timeout = 1)):\n\t\t\t\tif idx > 10:\n\t\t\t\t\tbreak\n\t\tself._threads.start_continuous_thread(_discover_nodes, thread_interval = setup['discover_t'])", "def _start_servers(self):\n for user, host, port in self.server_addresses:\n remoteHost = \"%s@%s\" % (user, host)\n logger.info(\"starting remote server %s:%s\", host, port)\n command = (\"cd ~/goaway;\" +\n \"find . -name '*.pyc' -delete ;\" +\n \"DEBUG=true goaway/cmdserver.py %s %s %s >> server.std.log 2>&1\" % (\n host,\n port,\n self._config.remote_path,\n ))\n logger.debug(\"Starting server:%s remoteHost with command:%s\" % (remoteHost, command))\n ## subprocess.call blocks, while subprocces.Popen doesn't block.\n sshPopen = subprocess.Popen([\"ssh\", remoteHost, command],\n shell = False, stdout= subprocess.PIPE, stderr = subprocess.PIPE)\n self._start_local_server()", "def _initRemoteMDSConnection(shotno):\n\tconn = _mds.Connection(_pref._HBT_SERVER_ADDRESS+':8003');\n\tconn.openTree('hbtep2', shotno);\n\treturn conn", "def __wait_for_master_ssh( self ):\n for _ in itertools.count( ):\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n try:\n s.settimeout( 5 )\n s.connect( ('mesos-master', 22) )\n return\n except socket.error:\n pass\n finally:\n s.close( )", "def Start(self, seed_list: List[str] = None, skip_seeds: bool = False) -> None:\n if not seed_list:\n seed_list = settings.SEED_LIST\n\n logger.debug(\"Starting up nodeleader\")\n if not skip_seeds:\n logger.debug(\"Attempting to connect to seed list...\")\n for bootstrap in seed_list:\n if not is_ip_address(bootstrap):\n host, port = bootstrap.split(':')\n bootstrap = f\"{hostname_to_ip(host)}:{port}\"\n addr = Address(bootstrap)\n self.KNOWN_ADDRS.append(addr)\n self.SetupConnection(addr)\n\n logger.debug(\"Starting up nodeleader: starting peer, mempool, and blockheight check loops\")\n # check in on peers every 10 seconds\n self.start_peer_check_loop()\n self.start_memcheck_loop()\n self.start_blockheight_loop()\n\n if settings.ACCEPT_INCOMING_PEERS and not self.incoming_server_running:\n class OneShotFactory(Factory):\n def __init__(self, leader):\n self.leader = leader\n\n def buildProtocol(self, addr):\n print(f\"building new protocol for addr: {addr}\")\n self.leader.AddKnownAddress(Address(f\"{addr.host}:{addr.port}\"))\n p = NeoNode(incoming_client=True)\n p.factory = self\n return p\n\n def listen_err(err):\n print(f\"Failed start listening server for reason: {err.value}\")\n\n def listen_ok(value):\n self.incoming_server_running = True\n\n logger.debug(f\"Starting up nodeleader: setting up listen server on port: {settings.NODE_PORT}\")\n server_endpoint = TCP4ServerEndpoint(self.reactor, settings.NODE_PORT)\n listenport_deferred = server_endpoint.listen(OneShotFactory(leader=self))\n listenport_deferred.addCallback(listen_ok)\n listenport_deferred.addErrback(listen_err)", "def connect(self):\n\t\tself._entity_server_connection.attempt_connection()", "def connect_to_server(self):\n\n server=os.popen('hostname').read()\n if 'epfl.ch' not in server:\n conn = mds.Connection('tcvdata.epfl.ch')\n conn.openTree('tcv_shot', self.shot)\n self.tree = conn\n print(\"You are in server \"+server+\", so I'll open a connection\")\n else:\n self.tree = mds.Tree('tcv_shot', self.shot)", "def host_bootstrap(args):\n name = args.name\n host = args.host\n port = args.port\n user = args.user\n protocol = args.protocol\n url = args.url\n pool = args.pool\n poolpath = args.poolpath\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n baseconfig.bootstrap(name, host, port, user, protocol, url, pool, poolpath)", "async def __initiate_connection(self):\r\n\r\n chainlink_model = ChainlinkResolver.resolve(self.name)\r\n if chainlink_model is None:\r\n LoggerInterface.error(f'The chainlink {self.name} is not registered yet. Register it first!')\r\n return\r\n\r\n self.socket_client.set_callback(self.callback)\r\n self.socket_client.set_using_chainlink(chainlink_model)\r\n await self.socket_client.connect()", "def connect(self):\n\n Log.info(f'Connecting to Kodeventure server at {SERVER_HOST}')\n web.run_app(\n self.aiohttp,\n host=PLAYER_HOST,\n port=PLAYER_PORT,\n ssl_context=self.cert\n )", "def setup_cluster(num_cpus, outdir, verbose, error_profile):\r\n\r\n server_socket = setup_server()\r\n workers, client_socks_and_adrs = setup_workers(\r\n num_cpus, outdir, server_socket,\r\n verbose=verbose,\r\n error_profile=error_profile)\r\n # we don't need the client adresses anywhere, so get rid of them\r\n client_sockets = [sock for sock, addr in client_socks_and_adrs]\r\n\r\n return client_sockets, workers, server_socket", "def _bootup_node(self, conn):\n compose_fname = COMPOSE_FNAME\n exec_plan = self.node_exec_plan.copy()\n while len(exec_plan) > 0:\n container_name = exec_plan.popleft()\n self.__bootup_service(conn, compose_fname, container_name)", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)" ]
[ "0.7119504", "0.68823236", "0.6732595", "0.6730273", "0.6507474", "0.6275269", "0.6274168", "0.6267048", "0.62213904", "0.6216114", "0.6149288", "0.61468875", "0.60358363", "0.60321194", "0.60266834", "0.5985796", "0.5937494", "0.59249455", "0.5908087", "0.5892862", "0.5862626", "0.5839461", "0.58353174", "0.57990944", "0.57977915", "0.57917804", "0.578646", "0.57577175", "0.57528436", "0.57325584" ]
0.69549376
1
For the given key append the given list values to the set in the network.
def append(self, key, value): dkey = digest(key) node = Node(dkey) def append_(nodes): # if this node is close too, then store here as well if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]): try: pvalue = json.loads(value) self.set_keys.add(dkey) if dkey not in self.storage: _log.debug("%s local append key: %s not in storage set value: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), pvalue)) self.storage[dkey] = value else: old_value_ = self.storage[dkey] try: old_value = json.loads(old_value_) new_value = list(set(old_value + pvalue)) except: # When the key have been used for single values it does not contain a list # When have been deleted contains None # Just replace old value new_value = pvalue old_value = old_value_ _log.debug("%s local append key: %s old: %s add: %s new: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value)) self.storage[dkey] = json.dumps(new_value) except: _log.debug("Trying to append something not a JSON coded list %s" % value, exc_info=True) ds = [self.protocol.callAppend(n, dkey, value) for n in nodes] return defer.DeferredList(ds).addCallback(self._anyRespondSuccess) nearest = self.protocol.router.findNeighbors(node) if len(nearest) == 0: self.log.warning("There are no known neighbors to set key %s" % key) _log.debug("There are no known neighbors to set key %s" % key) return defer.succeed(False) spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha) return spider.find().addCallback(append_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appendlist(self, key, value):\n self.setlistdefault(key, [])\n dict.__setitem__(self, key, self.getlist(key) + [value])", "def addlist(self, k, v):\n self_insert = self._insert\n values = super(OrderedMultiDict, self).setdefault(k, [])\n for subv in v:\n self_insert(k, subv)\n values.extend(v)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\n self.setdefault(key, []).append(value)", "def __setitem__(self, key, value):\r\n self.setdefault(key, []).append(value)", "def append_to(self, key, value):\n self.get_data()[key].append(value.get_data())", "def add(self, key, values):\n self.watchlists[key] = list(enumerate(values))", "def __setitem__(self, key, value) :\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n attribute[j][1].append(value)\n return\n attribute.append((key, [value]))", "def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)", "def _set_neighs_list_only(self, key):\n self._set_neighs_array_lvl1(np.array(key))", "def add_or_update_list_HELPER(data_dict, key, value):\n if key in data_dict:\n data_dict[key].append(value)\n else:\n data_dict[key] = [value]", "def put(self, key, value):\r\n temp = [key, value]\r\n flag = False\r\n for i in range(len(self.lis)):\r\n if self.lis[i][0] == temp[0]:\r\n self.lis[i][1] = temp[1]\r\n flag = True\r\n break\r\n if flag == False:\r\n self.lis.append(temp)", "def __setitem__(self, key, value):\r\n key = self.key(key)\r\n if key in self.data_with_same_key:\r\n self.data_with_same_key[key] += [self.data[key]]\r\n elif key in self.data:\r\n self.data_with_same_key[key] = [self.data[key]]\r\n self.data[key] = value", "def add_by_list_of_keys(dictionary: Dict, key_path: List[Any], value: Any) -> Dict:\n key = key_path[0]\n dictionary[key] = (\n value\n if len(key_path) == 1\n else add_by_list_of_keys(\n dictionary[key] if key in dictionary else dict(),\n key_path[1:],\n value,\n )\n )\n return dictionary", "def add(self, key, val):\n self.obtain(key).append(val)", "def _update_key_set(self):\n self._key_set = set([item.keyword for item in self._metadata])", "def set_key_in_dict_HELPER(data_dict, key_list, value_to_add):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict[key_list[-1]] = value_to_add", "def add_lists(self, key, value, pos):\n if pos == 'r':\n return self.redis.rpush(key, value)\n else:\n return self.redis.lpush(key, value)", "def _set_neighs_list_list_list(self, key):\n self.ks = list(range(len(key))) if self.ks is None else self.ks\n if self._constant_neighs:\n self.idxs = np.array(key)\n else:\n self.idxs = key\n if len(self.idxs[0]) != len(self.iss):\n self.iss = list(range(len(self.idxs[0])))\n if self.staticneighs:\n self.idxs = self.idxs[0]\n self._setted = True", "def put_keys(set_name, keys, value, send_key):\n\tfor key in keys:\n\t\tlib.write_record(set_name, key, [\"value\"], [value], send_key)", "def __setitem__(self, key, value):\n list.__setitem__(self, key, self.convertNode(value))", "def set(self, key, value):\r\n self.set_many({key: value})", "def set_many(self, keys, values, expire=Ellipsis):\n if expire is Ellipsis:\n expire = self.rope.config.expire_default\n return self.set_many_values(keys, [self.rope.encode(v) for v in values], expire)", "def multi_set(self, items, no_update_log=False):\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n lst = []\n for k, v in items.iteritems():\n if isinstance(v, (dict)):\n new_v = []\n for kk, vv in v.items():\n new_v.append(kk)\n new_v.append(vv)\n v = new_v\n if isinstance(v, (list, tuple)):\n assert self.separator, \"Separator is not set\"\n\n v = self.separator.join(v)\n lst.extend((k, v))\n\n wait(self.proto.misc(\"putlist\", lst, opts))", "def __setitem__(self, key, value):\n if isinstance(key, list):\n value = _ensure_len(len(key), value)\n for k, v in zip(key, value):\n defaultdict.__setitem__(self, k, v)\n else:\n defaultdict.__setitem__(self, key, value)\n return self", "def __setitem__(self, key, value):\n self.list[key] = value", "def __setitem__(self, key, val):\n self.set[key] = val", "def _set_neighs_list_list(self, key):\n if self._constant_neighs:\n key = np.array(key)\n if self.staticneighs:\n self.idxs = key\n self.ks = range(1) if self.ks is None else self.ks\n else:\n self.ks = range(1) if self.ks is None else self.ks\n len_ks = len(self.ks)\n self.idxs = [key for k in range(len_ks)]\n if type(key) == np.ndarray:\n self.idxs = np.array(self.idxs)\n if len(self.iss) != len(key):\n if len(self.iss) != len(key):\n self.iss = range(len(key))\n# if len(self.idxs[0]) > 0:\n# self.iss = list(range(len(self.idxs)))\n self._setted = True", "def add(self, key, val):\n key_lower = key.lower()\n new_vals = key, val\n # Keep the common case aka no item present as fast as possible\n vals = _dict_setdefault(self, key_lower, new_vals)\n if new_vals is not vals:\n # new_vals was not inserted, as there was a previous one\n if isinstance(vals, list):\n # If already several items got inserted, we have a list\n vals.append(val)\n else:\n # vals should be a tuple then, i.e. only one item so far\n # Need to convert the tuple to list for further extension\n _dict_setitem(self, key_lower, [vals[0], vals[1], val])", "def update_dict(dictionary, key, value):\n if key in dictionary:\n dictionary[key].append(value)\n else:\n dictionary[key] = [value]\n return dictionary" ]
[ "0.68404776", "0.64850795", "0.6393843", "0.6393843", "0.6334041", "0.63275087", "0.6321269", "0.61832106", "0.60738695", "0.60602796", "0.60555", "0.6010969", "0.59901476", "0.591847", "0.58933073", "0.58578247", "0.58196485", "0.58180434", "0.5807921", "0.57934815", "0.5766367", "0.57211536", "0.5703659", "0.5679214", "0.5666535", "0.56645435", "0.5660936", "0.5659584", "0.563052", "0.5621893" ]
0.65909123
1
Set the given key to the given value in the network.
def set(self, key, value): _log.debug("setting '%s' = '%s' on network" % (key, value)) dkey = digest(key) node = Node(dkey) def store(nodes): _log.debug("setting '%s' to %s on %s" % (key, value, map(str, nodes))) # if this node is close too, then store here as well if (not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]) or dkey in self.storage): _log.debug("setting '%s' to %s locally" % (key, value)) self.storage[dkey] = value ds = [self.protocol.callStore(n, dkey, value) for n in nodes] return defer.DeferredList(ds).addCallback(self._anyRespondSuccess) nearest = self.protocol.router.findNeighbors(node) if len(nearest) == 0: _log.warning("There are no known neighbors to set key %s" % key) return defer.succeed(False) spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha) return spider.find().addCallback(store)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set(self, key, value):\n self.log.debug(\"setting '%s' = '%s' on network\" % (key, value))\n dkey = digest(key)\n node = Node(dkey)\n\n def store(nodes):\n self.log.info(\"setting '%s' on %s\" % (key, list(map(str, nodes))))\n # if this node is close too, then store here as well\n if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n d = defer.DeferredList(ds)\n d.addCallback(self._anyRespondSuccess)\n d.addErrback(self.onError)\n return d\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n d = spider.find()\n d.addCallback(store)\n d.addErrback(self.onError)\n return d", "def set(self, key, value):\n self._data[key] = value", "def set(self, key, value):\n self._data[key] = value", "def set(self, key, value, cb=None):\n _log.analyze(self.node.id, \"+ CLIENT\", {'key': key, 'value': value})\n self.send(cmd='SET',msg={'key':key, 'value': value}, cb=cb)", "def set(self, key, value):", "def set(self, key, value):", "def setnx(self, key, value):\n return self.set(key, value, nx=True)", "def set(self, key, value):\r\n self.set_many({key: value})", "def set(self, key, value):\n raise NotImplementedError", "def set_value(self, key, value):\n self.data[key] = value\n self.save_data()", "def set(self, key, value):\n self.remove(key)\n self.add(key, value)", "async def set(self, key, value):\n trace_log(\"PersistantStorage: setting key \", key, \" to value \", value)\n self.dict[key] = value\n #self.log_set(key, value)", "def set(self, key, value):\n self.data[key] = value\n logger.debug('Setting value \"%s\" for variable \"%s\"', value, key)", "def set(self, key, value):\n return self.redis_handler.set(key, value)", "def set(self, key: T, value: U) -> None:\n self._store[key] = value", "def _set(self, key, value):\n self._data[key] = value\n return self._data[key]", "def set(self, key, value):\n self._attributes[key] = value", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def __setitem__(self, key, value):\n self.set(key, value)", "def set(self, key, value):\n self.context.set(self.prefix+'.'+key, value)", "def set_value(self, key, value):\n self._version[key] = value", "def set_to_redis(self, key: str, value):\n self.redis_client.hset(self.root_path, key, value)", "def set_value(self, key: keyType, new_value: valueType) -> None:\n self.validate(key, new_value)\n head_node_index, chain_node_index = self.exist_key(key)\n # \"head_node_index is equal to -1\" means that 'key' doesn't exist in dictionary object.\n if head_node_index == -1:\n self.add(key, new_value)\n else:\n self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values = [new_value]", "def set(self, key, value):\n hk = hash(key)\n h = self._hash(hk)\n i = 0\n while i < self.size:\n if self.slot[h] == None:\n self.slot[h] = key\n self.data[h] = value\n break\n i += 1\n h = self._rehash(hk, i)", "def __setitem__(self, key, val):\n self.__check_key_validity(key)\n self.data[key[0]][key[1]] = val", "def set(self, key, value):\n #try to lock the tree. If we succeed make sure\n #we dont lose updates from any other process\n if self._storage.lock():\n self._refresh_tree_ref()\n #get current top-level node and make a value-ref\n node = self._follow(self._tree_ref)\n value_ref = ValueRef(value)\n #insert and get new tree ref\n self._tree_ref = self._insert(node, key, value_ref)\n self._tree_ref = self._blacken(self._follow(self._tree_ref))", "def set(self, key, value):\n db = self._open()\n try:\n db[self.__ck(key)] = value\n finally:\n db.close()", "def set(self, key, value):\n self.db_dict.setdefault(self.actual_key(key), {})[key.field_name] = value", "def set(self, key, value):\r\n if not isinstance(key, str):\r\n raise TypeError(\"Key must be a string\")\r\n\r\n bin_num = self._get_bin(key)\r\n cur = self.bins[bin_num]\r\n\r\n if cur.next is None: # If first element is trailer node, insert here\r\n self._ensure_load()\r\n self.bins[bin_num] = Node((key, value), cur)\r\n success = True\r\n elif cur.value[0] == key: # If existing key, overwrite\r\n cur.value = (cur.value[0], value)\r\n success = False\r\n else: # Move towards end of linked list\r\n while cur.next is not None:\r\n prev = cur\r\n cur = cur.next\r\n self._ensure_load()\r\n prev.next = Node((key, value), cur)\r\n success = True\r\n\r\n if success:\r\n self.size += 1\r\n return success" ]
[ "0.8524701", "0.75702727", "0.75702727", "0.75380826", "0.7519836", "0.7519836", "0.74716425", "0.73170406", "0.73001647", "0.72439593", "0.71774316", "0.715227", "0.71102387", "0.7076738", "0.70714426", "0.70238066", "0.70225656", "0.69618803", "0.69618803", "0.69618803", "0.6960543", "0.69502884", "0.694307", "0.6930661", "0.69151086", "0.69031906", "0.6900333", "0.6891046", "0.6878402", "0.6872567" ]
0.8498977
1
For the given key remove the given list values from the set in the network.
def remove(self, key, value): dkey = digest(key) node = Node(dkey) _log.debug("Server:remove %s" % base64.b64encode(dkey)) def remove_(nodes): # if this node is close too, then store here as well if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]): try: pvalue = json.loads(value) self.set_keys.add(dkey) if dkey in self.storage: try: old_value = json.loads(self.storage[dkey]) new_value = list(set(old_value) - set(pvalue)) except: # When the key have been used for single values or deleted it does not contain a list # Just empty it old_value = self.storage[dkey] new_value = [] self.storage[dkey] = json.dumps(new_value) _log.debug("%s local remove key: %s old: %s remove: %s new: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value)) except: _log.debug("Trying to remove somthing not a JSON coded list %s" % value, exc_info=True) ds = [self.protocol.callRemove(n, dkey, value) for n in nodes] return defer.DeferredList(ds).addCallback(self._anyRespondSuccess) nearest = self.protocol.router.findNeighbors(node) if len(nearest) == 0: self.log.warning("There are no known neighbors to set key %s" % key) return defer.succeed(False) spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha) return spider.find().addCallback(remove_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_values_from_list(self,list_,*values):\r\n for value in values:\r\n while value in list_:\r\n list_.remove(value)", "def remove(self, key):", "def remove(self, key: int) -> None:\n \n index = self.hash(key)\n\n # If list doesn't exist just return\n if not self.map[index]: return\n\n # If it's the head of list, manipulate pointers\n if self.map[index].val[0] == key:\n self.map[index] = self.map[index].next\n return\n\n curr = self.map[index]\n\n # Search through list\n while curr.next:\n # If the value in list matches key, manipulate list\n if curr.next.val[0] == key: \n curr.next = curr.next.next\n return\n\n curr = curr.next\n\n # Otherwise if it's not in list do nothing", "def remove(self, key: int) -> None:\n t = key % 20011\n delete = []\n for item in self.hash[t]:\n if item[0] == key:\n delete = item\n if delete:\n self.hash[t].remove(delete)", "def remove(self, key):\r\n for i in range(len(self.lis)):\r\n if self.lis[i][0] == key:\r\n self.lis.pop(i)\r\n break", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def remove(self, key: int) -> None:\n if key in self.keys:\n idx = self.keys.index(key)\n self.keys.pop(idx)\n self.values.pop(idx)", "def remove(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n a[h] = None", "def remove_by_keys(self, keys):\n return list(filter(lambda item: item.keyword not in set(keys), self._metadata))", "def delete_many(self, keys):\n return self.delete_many_values(keys)", "def remove(self, key):\n pass", "def remove_subset_from_set(metaobject, subset_key):\n # If it is not a list, check if subset key in the dictionary and just remove that key\n if not isinstance(metaobject, list):\n if subset_key in metaobject:\n del metaobject[subset_key]\n else:\n for obj in metaobject:\n # Iterate over the list and remove the key from each object if it is there\n if subset_key in obj:\n del obj[subset_key]\n\n return metaobject", "def remove(self, key):\n ha = self.myhash(key)\n if key in self.hashmap[ha][0]:\n i = self.hashmap[ha][0].index(key)\n self.hashmap[ha][0].pop(i)\n self.hashmap[ha][1].pop(i)", "def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))", "def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))", "def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))", "def remove_from_values(values, to_remove):\n to_keep = []\n for x in to_remove:\n if '!' in x:\n to_keep.append(x.replace(\"!\", \"\"))\n\n if len(to_keep) == 0:\n for x in to_remove:\n del values[x]\n else:\n tmp_values = values.copy()\n for key in tmp_values.keys():\n if key not in to_keep:\n del values[key]", "def __delitem__(self, key: Union[Hashable, Sequence[Hashable]]) -> None:\n self.contents = {i: self.contents[i] for i in self.contents \n if i not in more_itertools.always_iterable(key)}\n return", "def remove_states(self, keys: list):\n if self.spec.graph:\n self.spec.graph.clear_children(keys)", "def remove(self, key: int | str):\n self.__delitem__(key)", "def remove(self, key):\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] -= 1\n\t\tself.n -= 1", "def remove_from_multidict(d: MultiDict, key: str, item: typing.Any):\n # works by popping all, removing, then re-adding into\n i = d.popall(key, [])\n if item in i:\n i.remove(item)\n\n for n in i:\n d.add(key, n)\n\n return d", "def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s", "def delete_many(self, keys):\n raise NotImplementedError()", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def remove(self, data, key, value):\n if key in data:\n if not value: # value is empty or false, just remove it\n data.pop(key, None) # delete\n elif isinstance(value, type(data[key])): # if same type\n if isinstance(value, list): # if it's a list, like modules\n data[key] = list(set(data[key]) - set(value))\n elif isinstance(\n value, dict\n ): # if it's a dict, difference of the keys and rebuild dict\n for k, v in value.items():\n data[key][k] = self.remove(data[key], k, v)\n else:\n raise TypeError(\n f\"Value of {key} is {type(value)} and\"\n f\" the imported {key} is {type(data[key])}. Type mismatch.\"\n )\n return data[key]", "def remove(self, key):\n match = self.find(key)\n if not match:\n raise UserDBValueError(\"Element not found in list\")\n\n self._elements = [this for this in self._elements if this != match]\n return self", "def remove(self, key):\n if self.head is None:\n print('Cannot remove from empty list!')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n\n itr = self.head\n prev = ListNode()\n while itr:\n curr = itr\n if itr.data == key:\n prev.next = curr.next\n return\n prev = curr\n itr = itr.next", "def remove(self,v):\n if isinstance(v,list): # list\n map(self.remove,v)\n else:\n if v in self:\n del self[v]", "def delDoublon(values):\n\treturn list(set(values))" ]
[ "0.7044609", "0.6552202", "0.6546256", "0.65116614", "0.64858824", "0.64747924", "0.6387488", "0.63731134", "0.63666767", "0.6270694", "0.6248914", "0.6236957", "0.62207824", "0.621597", "0.621597", "0.621597", "0.62116325", "0.6190252", "0.6188199", "0.6174154", "0.61655444", "0.6146638", "0.61258495", "0.6100552", "0.6086575", "0.6079161", "0.60780716", "0.6060451", "0.60581", "0.6046066" ]
0.66019624
1
We got some values! Exciting. But lets combine them all. Also, make sure we tell the nearest node that didn't have the value to store it.
def _handleFoundValues(self, jvalues): # TODO figure out if we could be more cleaver in what values are combined value = None _set_op = True if self.local_value: jvalues.append((None, self.local_value)) _log.debug("_handleFoundValues %s" % str(jvalues)) # Filter out deleted values jvalues = [v for v in jvalues if v[1] is not None] if len(jvalues) > 1: args = (self.node.long_id, str(jvalues)) _log.debug("Got multiple values for key %i: %s" % args) try: values = [(v[0], json.loads(v[1])) for v in jvalues] value_all = [] for v in values: value_all = value_all + v[1] value = json.dumps(list(set(value_all))) except: # Not JSON coded or list, probably trying to do a get_concat on none set-op data # Do the normal thing _log.debug("_handleFoundValues ********", exc_info=True) valueCounts = Counter([v[1] for v in jvalues]) value = valueCounts.most_common(1)[0][0] _set_op = False else: try: key, value = jvalues[0] except: value = "[]" # JSON empty list peerToSaveTo = self.nearestWithoutValue.popleft() if peerToSaveTo is not None: _log.debug("nearestWithoutValue %d" % (len(self.nearestWithoutValue)+1)) if _set_op: d = self.protocol.callAppend(peerToSaveTo, self.node.id, value) else: d = self.protocol.callStore(peerToSaveTo, self.node.id, value) return d.addCallback(lambda _: value) # TODO if nearest does not contain the proper set push to it return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def eliminate(values):\n for b in boxes:\n if len(values[b]) == 1:\n for p in peers[b]:\n values = assign_value(values, p, values[p].replace(values[b], ''))\n return values", "def _nodesFound(self, responses):\n toremove = []\n foundValues = []\n for peerid, response in responses.items():\n response = RPCFindResponse(response)\n if not response.happened():\n toremove.append(peerid)\n elif response.hasValue():\n foundValues.append((peerid, response.getValue()))\n else:\n peer = self.nearest.getNodeById(peerid)\n self.nearestWithoutValue.push(peer)\n self.nearest.push(response.getNodeList())\n _log.debug(\"_nodesFound nearestWithoutValue: %s, nearest: %s, toremove: %s\" %\n (self.nearestWithoutValue.getIDs(), self.nearest.getIDs(), toremove))\n self.nearest.remove(toremove)\n\n if len(foundValues) > 0:\n return self._handleFoundValues(foundValues)\n if self.nearest.allBeenContacted():\n # not found at neighbours!\n if self.local_value:\n # but we had it\n return self.local_value\n else:\n return None\n return self.find()", "def get_connective_values(self):\n values = [[self.truth_value, self.number]]\n if len(self.children) == 1:\n values.extend(self.children[0].get_connective_values())\n elif len(self.children) == 2:\n child = self.children[0].get_connective_values()\n child.extend(values)\n child.extend(self.children[1].get_connective_values())\n values = child\n return values", "def merge_nodes(self):\n\n\t\t\t#obtenemos los dos primeros nodos que equivalen a quienes tienen menor frecuencia\n\t\t\twhile(len(self.heap)>1):\n\t\t\t\tnode1 = heapq.heappop(self.heap)\n\t\t\t\tnode2 = heapq.heappop(self.heap)\n\n\t\t\t\tmerged = self.HeapNode(None, node1.freq + node2.freq)#creamos un nodo padre que va a contener los nodos anteriores a la derecha y izquierda\n\t\t\t\tmerged.left = node1\n\t\t\t\tmerged.right = node2\n\n\t\t\t\theapq.heappush(self.heap, merged)#agregamos este nodo al priority queue", "def __get_results(node):\n if node.results is not None: #Check if the node is a leaf.\n return node.results #If the node is a leaf, then just return its results\n else: #If the node is not a leaf, recursively combine the results from its branches.\n tbr = __get_results(node.tb) #get the results from the true branch\n fbr = __get_results(node.fb) #get the results from the false branch\n \n new_results = deepcopy(tbr) #make a deep copy of the results from the true branch\n for key in fbr: #Iterate through the keys in the false branch\n if key not in new_results: #Check whether or not the key is in the deep copy of the true branch\n new_results[key] = fbr[key] #If it is not, add the key to the deep copy, and make its value equal to the value in fbr\n else: #If the key is in the deep copy of the true branch, add the false branch's count to the value in the deep copy\n new_results[key] = fbr[key] + new_results[key]\n return new_results #return the merged results from the false and true branches", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n # values[peer] = values[peer].replace(digit, '')\n new_value = values[peer].replace(digit, '')\n assign_value(values, peer, new_value)\n return values", "def eliminate(values):\n complete_boxes = [box for box in values.keys() if len(values[box])==1]\n for box in complete_boxes:\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(values[box], \"\"))\n \n return values", "def update(self, tree: \"Tree\") -> List[ValueObject]:\n new_values = set([])\n not_matched = set([])\n to_delete = set([])\n # Trees are lazy and need to be initialized before use.\n self.init()\n tree.init()\n # self.tree doesn't have labels -> there are no labels to query.\n if not self.tree and tree.vos:\n del self.vos[:]\n not_matched = range(len(tree.vos))\n else:\n # search_hits saves the intersection of all label matches.\n # The indices in the sets at the end are the search hits.\n search_hits = {ix: set([]) for ix in range(len(tree.vos))}\n for label in self.label_grid:\n if label in (\"_auto\",):\n continue\n if label in tree.tree and label in self.tree:\n # All label values that exist in both trees.\n for label_value in (\n tree.tree[label].keys() & self.tree[label].keys()\n ):\n for new_ix in tree.tree[label][label_value]:\n if new_ix in search_hits:\n if search_hits[new_ix]:\n search_hits[new_ix] &= self.tree[label][\n label_value\n ]\n else:\n search_hits[new_ix] |= self.tree[label][\n label_value\n ]\n # All label values in the new tree that are not in this tree.\n # Value objects that have a label value that is not included\n # in the current tree means that they will not be matched.\n for label_value in (\n tree.tree[label].keys() - self.tree[label].keys()\n ):\n for new_ix in tree.tree[label][label_value]:\n search_hits.pop(new_ix)\n not_matched.add(new_ix)\n elif label in self.tree:\n # All value objects with labels not specified in the other\n # tree are treated as search hits (for this label).\n unused_label = set.union(*self.tree[label].values())\n for new_ix in search_hits:\n if search_hits[new_ix]:\n search_hits[new_ix] &= unused_label\n else:\n search_hits[new_ix] |= unused_label\n elif label in tree.tree:\n raise ParamToolsError(\n f\"Label {label} was not defined in the defaults.\"\n )\n\n for ix, search_hit_ixs in search_hits.items():\n if search_hit_ixs:\n if tree.vos[ix][\"value\"] is not None:\n for search_hit_ix in search_hit_ixs:\n self.vos[search_hit_ix][\"value\"] = tree.vos[ix][\n \"value\"\n ]\n else:\n to_delete |= search_hit_ixs\n else:\n not_matched.add(ix)\n if to_delete:\n # Iterate in reverse so that indices point to the correct\n # value. If iterating ascending then the values will be shifted\n # towards the front of the list as items are removed.\n for ix in sorted(to_delete, reverse=True):\n del self.vos[ix]\n\n if not_matched:\n for ix in not_matched:\n if tree.vos[ix][\"value\"] is not None:\n self.vos.append(tree.vos[ix])\n new_values.add(len(self.vos) - 1)\n\n # It's faster to just re-build from scratch if values are deleted.\n if to_delete:\n self.new_values = None\n self.needs_build = True\n else:\n self.new_values = new_values\n self.needs_build = True\n\n return self.vos", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def calculate(self) :\n #self.graph\n self.val_i = 0\n while (len(self.undefined_vertices) > 0) :\n cond = True\n while cond:\n cond = False\n for node in self.undefined_vertices:\n if self.condition_satisfied(node, self.val_i) :\n cond = True\n self.undefined_vertices.remove(node)\n self.val_l[node] = self.val_i\n self.val_c[node] = self.val_m\n self.val_m += 1\n for node in self.undefined_vertices:\n if self.val_i not in [self.val_l[n] for n in\n self.graph.next_nodes(node)]:\n self.undefined_vertices.remove(node)\n self.val_l[node] = self.infinity\n self.val_c[node] = 0\n self.val_i += 1\n self.graph.save_graph(\"check\")\n self.save(\"this.value\")\n return self.graph.to_dictionary(), self.val_l, self.val_c", "def eliminate(values):\n\tsolved = [box for box in boxes if len(values[box]) == 1]\n\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\tfor empty in empties:\n\t\tvalues[empty] = '123456789'\n\n\tfor box in solved:\n\n\t\tfor peer in peers[box]:\n\t\t\tvalues = assign_value(values, peer, values[peer].replace(values[box], ''))\n\n\treturn values", "def value(self,value):\n if math.isnan(value):\n return\n self.__append(value)", "def merge_all(self, values):\n return reduce(self.merge, values, self.extremum)", "def merge_duplicate_nodes(self):\n merges={}\n xys={}\n for n in self.valid_node_iter():\n k=tuple(self.nodes['x'][n])\n if k in xys:\n merges[n]=xys[k]\n self.merge_nodes(xys[k],n)\n else:\n xys[k]=n\n return merges", "def get_existing_values(self): #DONE\n return (value.value for value in self.address.values() if value.value)", "def graph_traversal_sum(values_in, connections_in, nodes_start, nodes_end):\n # Make sure that original objects are not changed\n values = values_in.copy()\n connections = connections_in.copy()\n\n # End node\n node_end = next(iter(nodes_end))\n\n # Function to calculate the path from a given sink to the source \n def sink_path(connections_in, node_start, nodes_start, node_end, path=None, last_junction=None):\n # List with the nodes that make the path from node_start until node_end\n path = path if path else [node_start]\n # Size of the connections matrix\n nodes_length = len(connections_in[node_start])\n # List of nodes connected to the current node that are not yet in the calculated path and are not sinks\n next_node = [x for x in range(0, nodes_length) if connections_in[path[-1], x] == 1 and x not in nodes_start and x not in path]\n # Last node in the path that is a junction (intersection of more than 2 nodes)\n if len(next_node) > 1:\n last_junction = path[-1]\n # Iterate over all possible connections from the current node\n for node in next_node:\n if node not in path:\n # Add (temporarily) the next possible node in the path\n path.append(node)\n if node == node_end:\n # Source, end of path\n return path\n # Calculate path again from current node\n path = sink_path(connections_in, node_start, nodes_start, node_end, path, last_junction)\n # The next possible node is a sink, this path is not valid, remove all nodes in the path from the last junction\n if not next_node:\n index = len(path) - 1\n if last_junction is not None:\n index = path.index(last_junction) + 1\n path = path[0:index]\n return path\n\n # Calculate all paths from sinks to the source\n paths = []\n for node in nodes_start:\n paths.append(sink_path(connections, node, nodes_start, node_end))\n\n # Function to sum up the node values of all paths in the graph\n def path_sum(connections_in, paths, values_in):\n # Size of the connections matrix\n nodes_length = len(connections_in[0])\n # Matrix with the output graph traversal sum\n connections_out = np.zeros(shape=(nodes_length, nodes_length))\n # Iterate over all elements in the matrix and add the initial values for every path for every node\n for x in range(0, nodes_length):\n for y in range(0, nodes_length):\n # If the given element of connections_in is not zero, is a node of the graph\n if connections_in[x, y] > 0:\n # Iterate over all paths\n for path_index in range(0, len(paths)):\n path = paths[path_index]\n for i in range(0, len(path) - 2):\n # Check if the current element of the matrix is part of the current path\n if path[i] == x and path[i + 1] == y:\n # Add the value coming from the corresponding path\n connections_out[x, y] += values_in[x, y]\n # The matrix is symmetric, add the transpose of the calculated matrix\n connections_out = np.maximum( connections_out, connections_out.transpose())\n return connections_out\n\n return path_sum(connections, paths, values)", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def _merge_results(self, res_to_node, res_to_end):\n path_to_node = res_to_node['path']\n path_to_end = res_to_end['path']\n\n # Contains distances from start to other nodes\n dist_from_start = res_to_node['dist']\n # Contains distances from end to other nodes\n dist_from_end = res_to_end['dist']\n\n to_node_contributions = res_to_node['contributions']\n to_end_contributions = res_to_end['contributions']\n\n # Remove any shared nodes from the concatenated path\n shared_node = None\n i = 0\n full_path_ele_gain = res_to_node['ele_gain'] + res_to_end['ele_gain']\n while(i < len(path_to_end) and len(path_to_node) > 0\n and path_to_node[-1] == path_to_end[i]):\n shared_node = path_to_node[-1]\n path_to_node.pop(-1)\n full_path_ele_gain -= to_node_contributions[shared_node]\n full_path_ele_gain -= to_end_contributions[shared_node]\n i += 1\n # Replace the final shared node (midpoint if the paths share no other nodes)\n path_to_node.append(shared_node)\n full_path_ele_gain += to_node_contributions[shared_node]\n full_path = path_to_node + path_to_end[i:]\n full_path_len = dist_from_start[shared_node] + dist_from_end[shared_node]\n\n return SearchResult(\n path=full_path,\n path_len= full_path_len,\n ele_gain=full_path_ele_gain\n )", "def resolve_values_on_links(grid, link_values):\n return (\n np.multiply(\n (\n (\n grid.node_x[grid.node_at_link_head]\n - grid.node_x[grid.node_at_link_tail]\n )\n / grid.length_of_link\n ),\n link_values,\n ),\n np.multiply(\n (\n (\n grid.node_y[grid.node_at_link_head]\n - grid.node_y[grid.node_at_link_tail]\n )\n / grid.length_of_link\n ),\n link_values,\n ),\n )", "def merge(self):\n for i in range(len(self.main_grid_values)):\n for j in range(len(self.main_grid_values) - 1):\n if self.main_grid_values[i][j] == self.main_grid_values[i][j+1]:\n self.score_value.set(str(int(self.score_value.get()) + self.main_grid_values[i][j]*2))\n self.main_grid_values[i][j] *= 2\n self.main_grid_values[i][j+1] = 0", "def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"", "def _process_data(self, values, edges):\n values = np.array(values)\n edges = np.array(edges, dtype=np.float)\n if len(edges) == len(values):\n widths = list(set(np.diff(edges)))\n if len(widths) == 1:\n width = widths[0]\n else:\n raise Exception('Centered bins have to be of equal width.')\n edges -= width/2.\n edges = np.concatenate([edges, [edges[-1]+width]])\n return values, edges", "def update_attr(self):\n\n # Retrieve all current values\n all_values = nx.get_node_attributes(self.G, 'value')\n\n new_values = {}\n\n # Loop over all nodes\n for i in range(self.n_v):\n\n # Obtain list of neighbors\n neighbors = list(nx.all_neighbors(self.G, i))\n\n # Compute part dependent on own node\n val_i = all_values[i]\n new_value = (1 - self.eps) * (1 - self.a * val_i * val_i)\n\n # Compute part dependent on neighbor nodes\n neighbors_value = 0\n for neighbor in neighbors:\n val_n = all_values[neighbor]\n neighbors_value += (1 - self.a * val_n * val_n)\n\n # Catch nodes without neighbors\n try:\n new_value += neighbors_value * (self.eps/len(neighbors))\n except ZeroDivisionError:\n pass\n\n # Save new value\n new_values[i] = {'value': new_value}\n\n nx.set_node_attributes(self.G, new_values)", "def _append_value(self, v_values, next_value, v_idx=None, n_vals=1):\n for _ in range(n_vals):\n if v_idx:\n try:\n v_i = next(v_idx)\n except StopIteration:\n # Repeating commas are null-statements and can be ignored\n # Otherwise, we warn the user that this is a bad namelist\n if next_value is not None:\n warnings.warn(\n 'f90nml: warning: Value {v} is not assigned to '\n 'any variable and has been removed.'\n ''.format(v=next_value)\n )\n\n # There are more values than indices, so we stop here\n break\n\n v_s = [self.default_start_index if idx is None else idx\n for idx in v_idx.first]\n\n if not self.row_major:\n v_i = v_i[::-1]\n v_s = v_s[::-1]\n\n # Multidimensional arrays\n if not self.sparse_arrays:\n pad_array(v_values, list(zip(v_i, v_s)))\n\n # We iterate inside the v_values and inspect successively\n # deeper lists within the list tree. If the requested index is\n # missing, we re-size that particular entry.\n # (NOTE: This is unnecessary when sparse_arrays is disabled.)\n\n v_subval = v_values\n for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]):\n try:\n v_subval = v_subval[i_v - i_s]\n except IndexError:\n size = len(v_subval)\n v_subval.extend([] for _ in range(size, i_v - i_s + 1))\n v_subval = v_subval[i_v - i_s]\n\n # On the deepest level, we explicitly assign the value\n i_v, i_s = v_i[-1], v_s[-1]\n try:\n v_subval[i_v - i_s] = next_value\n except IndexError:\n size = len(v_subval)\n v_subval.extend(None for _ in range(size, i_v - i_s + 1))\n v_subval[i_v - i_s] = next_value\n else:\n v_values.append(next_value)", "def __fillCoordinatesFromSource(self):\n self.xValues = []\n if self.yCoordinates:\n self.yValues = []\n if self.zCoordinates:\n self.zValues = []\n if self.clusterLabels:\n self.clusterValues = []\n if self.mixtureLabels:\n self.mixtureValues = []\n\n # initial setup for x,y,z Values, clusterValues, mixtureValues, and colorMapValues\n for pltIndex in range(len(self.outStreamTypes)):\n self.xValues.append(defaultdict(list))\n if self.yCoordinates:\n self.yValues.append(defaultdict(list))\n if self.zCoordinates:\n self.zValues.append(defaultdict(list))\n if self.clusterLabels:\n self.clusterValues.append(defaultdict(list))\n if self.mixtureLabels:\n self.mixtureValues.append(defaultdict(list))\n if self.colorMapCoordinates[pltIndex] is not None:\n self.colorMapValues[pltIndex] = defaultdict(list)\n\n # fill x,y,z Values, clusterValues, mixtureValues, and colorMapValues\n for pltIndex in range(len(self.outStreamTypes)):\n if len(self.sourceData[pltIndex]) == 0:\n return False\n dataSet = self.sourceData[pltIndex].asDataset()\n # anything but HistorySet\n if self.sourceData[pltIndex].type.strip() != 'HistorySet':\n for i in range(len(self.xCoordinates[pltIndex])):\n xSplit = self._returnSplitIndex('x', pltIndex, i)\n self.xValues[pltIndex][1].append(np.asarray(dataSet[xSplit].values.astype(float, copy=False)))\n if self.yCoordinates:\n for i in range(len(self.yCoordinates[pltIndex])):\n ySplit = self._returnSplitIndex('y', pltIndex, i)\n self.yValues[pltIndex][1].append(np.asarray(dataSet[ySplit.strip()].values.astype(float, copy=False)))\n if self.zCoordinates and self.dim > 2:\n for i in range(len(self.zCoordinates[pltIndex])):\n zSplit = self._returnSplitIndex('z', pltIndex, i)\n self.zValues[pltIndex][1].append(np.asarray(dataSet[zSplit.strip()].values.astype(float, copy=False)))\n if self.clusterLabels:\n for i in range(len(self.clusterLabels[pltIndex])):\n clusterSplit = self._returnSplitIndex('clusterLabels', pltIndex, i)\n self.clusterValues[pltIndex][1].append(np.asarray(dataSet[clusterSplit.strip()].values.astype(float, copy=False)))\n if self.mixtureLabels:\n for i in range(len(self.mixtureLabels[pltIndex])):\n mixtureSplit = self._returnSplitIndex('mixtureLabels', pltIndex, i)\n self.mixtureValues[pltIndex][1].append(np.asarray(dataSet[mixtureSplit.strip()].values.astype(float, copy=False)))\n if self.colorMapCoordinates[pltIndex] is not None:\n for i in range(len(self.colorMapCoordinates[pltIndex])):\n cSplit = self._returnSplitIndex('colorMap', pltIndex, i)\n self.colorMapValues[pltIndex][1].append(np.asarray(dataSet[cSplit.strip()].values.astype(float, copy=False)))\n # check if the array sizes are consistent\n sizeToMatch = self.xValues[pltIndex][1][-1].size\n if self.yCoordinates and self.yValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<y> variable has a size ({self.yValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<z> variable has a size ({self.zValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.colorMapCoordinates[pltIndex] is not None and self.colorMapValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<colorMap> variable has a size ({self.colorMapValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n else:\n # HistorySet\n pivotParam = self.sourceData[pltIndex].indexes[0]\n for cnt in range(len(self.sourceData[pltIndex])):\n maxSize = 0\n for i in range(len(self.xCoordinates[pltIndex])):\n xSplit = self._returnSplitIndexHS('x', pltIndex, i)\n # for variable from input space, it will return array(float), not 1d array\n self.xValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[xSplit].values.astype(float, copy=False)))\n maxSize = self.xValues[pltIndex][cnt][-1].size if self.xValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.yCoordinates:\n for i in range(len(self.yCoordinates[pltIndex])):\n ySplit = self._returnSplitIndexHS('y', pltIndex, i)\n self.yValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[ySplit].values.astype(float, copy=False)))\n maxSize = self.yValues[pltIndex][cnt][-1].size if self.yValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.zCoordinates and self.dim > 2:\n for i in range(len(self.zCoordinates[pltIndex])):\n zSplit = self._returnSplitIndexHS('z', pltIndex, i)\n self.zValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[zSplit].values.astype(float, copy=False)))\n maxSize = self.zValues[pltIndex][cnt][-1].size if self.zValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.colorMapCoordinates[pltIndex] is not None:\n for i in range(len(self.colorMapCoordinates[pltIndex])):\n colorSplit = self._returnSplitIndexHS('colorMap', pltIndex, i)\n self.colorMapValues[pltIndex][cnt].append(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[colorSplit].values.astype(float, copy=False))\n maxSize = self.colorMapValues[pltIndex][cnt][-1].size if self.colorMapValues[pltIndex][cnt][-1].size > maxSize else maxSize\n # expand the scalars in case they need to be plotted against histories\n if self.xValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.xValues[pltIndex][cnt][-1] = np.full(maxSize, self.xValues[pltIndex][cnt][-1])\n if self.yCoordinates and self.yValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.yValues[pltIndex][cnt][-1] = np.full(maxSize, self.yValues[pltIndex][cnt][-1])\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.zValues[pltIndex][cnt][-1] = np.full(maxSize, self.zValues[pltIndex][cnt][-1])\n if self.colorMapCoordinates[pltIndex] is not None and self.colorMapValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.colorMapValues[pltIndex][cnt][-1] = np.full(maxSize, self.colorMapValues[pltIndex][cnt][-1])\n # check if the array sizes are consistent\n if self.yCoordinates and self.yValues[pltIndex][cnt][-1].size != maxSize:\n self.raiseAnError(Exception, f\"<y> variable has a size ({self.yValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][cnt][-1].size != maxSize:\n self.raiseAnError(Exception, f\"<z> variable has a size ({self.zValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.colorMapCoordinates[pltIndex] is not None and len(self.colorMapValues[pltIndex][cnt][-1]) != maxSize:\n self.raiseAnError(Exception, f\"<colorMap> variable has a size ({self.colorMapValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n\n # check if values have been filled\n if len(self.xValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.xValues[pltIndex]:\n if len(self.xValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][i].size == 0:\n return False\n if self.yCoordinates:\n if len(self.yValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.yValues[pltIndex]:\n if len(self.yValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][i].size == 0:\n return False\n if self.zCoordinates and self.dim > 2:\n if len(self.zValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.zValues[pltIndex]:\n if len(self.zValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][i].size == 0:\n return False\n if self.clusterLabels:\n if len(self.clusterValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.clusterValues[pltIndex]:\n if len(self.clusterValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.clusterValues[pltIndex][key])):\n if self.clusterValues[pltIndex][key][i].size == 0:\n return False\n if self.mixtureLabels:\n if len(self.mixtureValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.mixtureValues[pltIndex]:\n if len(self.mixtureValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.mixtureValues[pltIndex][key])):\n if self.mixtureValues[pltIndex][key][i].size == 0:\n return False\n if self.colorMapCoordinates[pltIndex] is not None:\n if len(self.colorMapValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.colorMapValues[pltIndex]:\n if len(self.colorMapValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][i].size == 0:\n return False\n\n return True", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def add_node(self, val):\n if val not in self:\n self.setdefault(val, [])", "def __add_one_day_values__(self):\n values = self.values()\n for value in values:\n ls = []\n if value.label in self.values_dict:\n ls = self.values_dict[value.label]\n ls.append(value)\n else:\n ls = [value]\n self.values_dict[value.label] = ls", "def union(self, otherpvalueset):\n for candidate in otherpvalueset.pvalues.itervalues():\n self.add(candidate)" ]
[ "0.59434026", "0.56864935", "0.561745", "0.55376625", "0.5509779", "0.5458379", "0.5435721", "0.5396941", "0.53683496", "0.5350505", "0.5343917", "0.5340923", "0.5333917", "0.5325144", "0.5264902", "0.5218789", "0.5205872", "0.5203127", "0.5198281", "0.5189773", "0.51723", "0.5169", "0.5164717", "0.5159331", "0.51478124", "0.51366097", "0.51364076", "0.5133444", "0.51286113", "0.5114369" ]
0.7143264
0
Performs a generic find operation on the specified controller and formats the output in color.
def _generic_find(controller, heading, patterns): msg.info(heading) msg.info("--------------------------") msg.blank() for pattern in patterns: for entry in controller.find(pattern): if hasattr(entry, "uuid"): eid = entry.uuid elif hasattr(entry, "fqn"): eid = entry.fqn else: eid = entry.name text = "{} | {} ".format(eid, entry.root) msg.arb(text, [msg.cenum["cwarn"], msg.cenum["cstds"]], '|')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_controller(self, controller):\n if controller is None:\n return None\n # If the output specified is a string controller e.g. \"WelcomeController@show\"\n elif isinstance(controller, str):\n if \"@\" in controller:\n controller_path, controller_method_str = controller.split(\"@\")\n else:\n controller_path = controller\n controller_method_str = \"__call__\"\n\n controller_path = modularize(controller_path).split(\".\")\n if len(controller_path) > 1:\n controller_name = controller_path.pop()\n prefix_path = \".\".join(controller_path)\n else:\n controller_name = controller_path[0]\n prefix_path = \"\"\n # build a list of all locations where the controller can be found\n # if the controller is defined such as auth.WelcomeController, append the prefix path to\n # the locations\n locations = list(\n map(\n lambda loc: f\"{loc}.{removeprefix(prefix_path, loc)}\"\n if prefix_path\n else loc,\n self.controllers_locations,\n )\n )\n try:\n self.controller_class = Loader.find(\n Controller, locations, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # controller is an instance with a bound method\n elif hasattr(controller, \"__self__\"):\n _, controller_method_str = controller.__qualname__.split(\".\")\n self.controller_instance = controller.__self__\n\n # it's a class or class.method, we don't have to find it, just get the class\n elif hasattr(controller, \"__qualname__\"):\n if \".\" in controller.__qualname__:\n controller_name, controller_method_str = controller.__qualname__.split(\n \".\"\n )\n else:\n controller_name = controller.__qualname__\n controller_method_str = \"__call__\"\n\n try:\n self.controller_class = Loader.get_object(\n controller.__module__, controller_name, raise_exception=True\n )\n except LoaderNotFound as e:\n self.e = e\n print(f\"\\033[93mTrouble importing controller!\\n> {str(e)}\\033[0m\")\n # it's a controller instance\n else:\n self.controller_instance = controller\n controller_method_str = \"__call__\"\n\n # Set the controller method on class. This is a string\n self.controller_method = controller_method_str", "def search_helper():\n\n if request.args.get(\"movie_name\"):\n movie_name = request.args.get(\"movie_name\")\n movie = Movie.query.filter(Movie.name == movie_name).one()\n session['movie'] = movie.name\n\n else:\n print 'RANDOMLY PICKING A MOVIE'\n movie = random.choice(Movie.query.all())\n\n color_list = get_colors_from_movie(movie)\n print 'Originally got colors %s from Movie %s' % (sorted(color_list), movie.name)\n\n result_dict = etsy.get_listing_items(color_list)\n\n print 'Colors returned %s' % (sorted(result_dict['colors']))\n \n best_dict = etsy.get_image_urls(result_dict, movie.id)\n \n (top_listing, bottom_listing, accessory_listing, dress_listing,\n shoe_listing, bag_listing) = etsy.get_listing_urls(best_dict)\n\n print 'returning ' , result_dict['colors']\n return (result_dict['colors'], movie, best_dict, top_listing, bottom_listing, accessory_listing, dress_listing,\n shoe_listing, bag_listing)", "def Find(*args, **kwargs):\n return _gdi_.ColourDatabase_Find(*args, **kwargs)", "def FindColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_FindColor(self, *args)", "def search(self, key, print_path=False):\n search_node = self.get_node(key, print_path)\n print(\"ID: {}\\nValue: {}\\nColor: {}\".format(search_node.key, search_node.value, search_node.get_color()))", "def search_results(self, results):\n for index, item in enumerate(results):\n print '[%s] %s (%s) {%s}' % (\n index, \n self._color(item.title), \n self._color(item.year, 'RED'), \n self._color(item.imdbid, 'GREEN'))", "def command_find(args):\n _perform_environment_check()\n\n filter_dict = _get_find_filter_dict(args)\n _find_verify_arguments(filter_dict)\n\n session = setup_session()\n expanded_queries = _expand_query_list(\n session, args[\"queries\"], True, args[\"verbose\"])\n query_results = retrieve_object_info(session, expanded_queries, \"unsorted\")\n\n filtered_results = _find_filter_results(query_results, filter_dict)\n\n dedup_results = _replica_results_dedup(filtered_results)\n _find_print_results(dedup_results, args[\"print0\"])", "def disp_found(num):\n from x84.bbs import getterminal, echo\n term = getterminal()\n echo(u''.join((u'\\r',\n term.bold_white(u'%d' % (num,)),\n term.yellow(u' lOCAtiON%s diSCOVEREd ' %\n (u's' if num > 1 else u'')),\n term.bold_black(u'...'),)))", "def find(self, **kwargs):\n matches = self.findall(**kwargs)\n num_matches = len(matches)\n if num_matches == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(404, msg)\n elif num_matches > 1:\n raise exceptions.NoUniqueMatch\n else:\n return matches[0]", "def send_find(self, statement):\n msg_type, msg = self.protocol.build_find(statement)\n self._execute_prepared_pipeline(msg_type, msg, statement)\n return DocResult(self) if statement.is_doc_based() else RowResult(self)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_find_entity_by_id_action(self):\n pass", "def search(self, key, print_path=False):\r\n _, search_node = self.__compare(key, method='search', print_path=print_path)\r\n if not search_node.key:\r\n print(\"Node doesn't exist!\")\r\n else:\r\n print(\"ID: {}\\nValue: {}\\nColor: {}\".format(search_node.key, search_node.value, search_node.get_color()))", "def search(\n self, color, board, valid_actions, \n output_move_row, output_move_column):\n raise NotImplementedError('You will have to implement this.')", "def find_user_composer() -> FindUserController:\n\n repository = UserRepository()\n use_case = FindUser(repository)\n find_user_route = FindUserController(use_case)\n\n return find_user_route", "def show(request, pk, ck):\n\n project_container = get_object_or_404(ProjectContainer, id=pk)\n coding = get_object_or_404(CodingProject, id=ck)\n\n user = get_user(request)\n coder = Person.objects.using('datatracker').get(id=coding.coder)\n if project_container.code_request is None:\n mentor = coder\n else:\n mentor = Person.objects.using('datatracker').get(id=project_container.code_request.mentor)\n\n # According to model areas and working groups should come from documents\n tags = []\n keys = []\n areas = []\n if project_container.docs:\n keys = filter(None, project_container.docs.split(';'))\n docs = list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('name', 'document__group__name',\n 'document__group__parent__name'))\n for name, gname, gparentname in docs:\n if gparentname:\n if gparentname not in areas:\n areas.append(gparentname) # use acronym?\n else:\n areas.append(gname)\n tags += coding.tags.all()\n\n if not areas:\n areas = [constants.STRING_NONE]\n if not tags:\n tags = [constants.STRING_NONE]\n\n return render_page(request, constants.TEMPLATE_MATCHES_SHOW, {\n 'projectcontainer': project_container,\n 'coding': coding,\n 'areas': areas,\n 'tags': tags,\n 'docs': docs,\n 'coder': coder,\n 'mentor': mentor,\n 'owner': user,\n 'list_template': constants.TEMPLATE_MATCHES_LIST\n })", "def __find(self):\n txt = self.textCursor().selectedText()\n self.__mainWindow.showFind(txt)", "def _search_print_lines(self, repo_list, lines, fmt):\n for repo in repo_list[\"results\"]:\n if \"is_official\" in repo and repo[\"is_official\"]:\n is_official = \"[OK]\"\n else:\n is_official = \"----\"\n description = \"\"\n for dfield in (\"description\", \"short_description\"):\n if dfield in repo and repo[dfield] is not None:\n for char in repo[dfield]:\n if char == '\\n':\n break\n if char in string.printable:\n description += char\n break\n name = \"\"\n for nfield in (\"name\", \"repo_name\"):\n if nfield in repo and repo[nfield] is not None:\n name = repo[nfield]\n break\n stars = \"\"\n if \"star_count\" in repo and repo[\"star_count\"] is not None:\n stars = str(repo[\"star_count\"])\n Msg().out(fmt % (name, is_official, description, stars))\n lines -= 1\n if not lines:\n break", "def vcard_find(cmd, *args):\n if len(args) == 0:\n return vcard_find.__doc__\n if len(args) > 0:\n regStr = ' '.join(args)\n cfg = get_config()\n\n # no options, just print all\n matches = sets.Set()\n files = glob.glob(cfg['vcard_dir'] + '/*.vcf')\n for f in files:\n with open(f, \"r\") as fh:\n data = fh.read()\n match = re.search(re.escape(regStr), data, re.IGNORECASE)\n if match is not None:\n nick = f[len(cfg['vcard_dir']) + 1:\n -4] # strip dir portions\n matches.add(nick)\n return ',\\t'.join(matches)", "def showFind(self, txt=\"\"):\n self.__searchWidget.showFind(txt)", "def do_show(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['all_projects'] = args.all_projects\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.get(**opts)\n if args.format == 'json':\n print(jsonutils.dumps(container._info, indent=4, sort_keys=True))\n elif args.format == 'yaml':\n print(yaml.safe_dump(container._info, default_flow_style=False))\n elif args.format == 'table':\n _show_container(container)", "def scan(controller, path):", "def find_resource(manager, name_or_id):\n # first try to get entity as integer id\n try:\n if isinstance(name_or_id, int) or name_or_id.isdigit():\n return manager.get(int(name_or_id))\n except exceptions.NotFound:\n pass\n\n # now try to get entity as uuid\n try:\n uuid.UUID(str(name_or_id))\n return manager.get(name_or_id)\n except (ValueError, exceptions.NotFound):\n pass\n\n # finally try to find entity by name\n try:\n return manager.find(name=name_or_id)\n except exceptions.NotFound:\n msg = \"No %s with a name or ID of '%s' exists.\" % \\\n (manager.resource_class.__name__.lower(), name_or_id)\n raise exceptions.CommandError(msg)", "def find(self, **kwargs):\n rl = self.findall(**kwargs)\n num = len(rl)\n\n if num == 0:\n msg = \"No %s matching %s.\" % (self.resource_class.__name__, kwargs)\n raise exceptions.NotFound(msg)\n elif num > 1:\n raise exceptions.NoUniqueMatch\n else:\n return self.get(rl[0].id)", "def find(self, term):\r\n params = base.get_params(None, locals())\r\n url = '{0}/find'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def find(self, term):\r\n params = base.get_params(None, locals())\r\n url = '{0}/find'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def info(controller_id=None):\n if controller_id is None:\n try:\n return [c.get_info() for c in controller.get_controllers()]\n except:\n return {\"status\": \"Failed to get controllers information\"}\n else:\n try:\n return controller.Controller(controller_id).get_info()\n except:\n return {\"controller\": controller_id,\n \"status\": \"Failed to get controller information\"}", "def show_controller(cls, args, config):\n if len(args) == 0:\n raise MOLNSException(\"USAGE: molns controller show name\")\n return {'msg': str(config.get_object(name=args[0], kind='Controller'))}", "def find(request):\n if request.method == \"POST\":\n if request.POST.get(\"find\"):\n topics = request.db[\"topic\"].find(\n {\"title\": {\"$regex\": request.POST.get(\"find\"), \"$options\": \"i\"}}\n )\n else:\n topics = False\n\n return render_to_response(\n \"templates/search.html\",\n {\"topics\": topics, \"count\": count(request)},\n request=request,\n )\n return HTTPFound(location=\"/\")", "def search(self, needle, haystack):\n\n # if a highlight color is not configured, exit early\n if not self._config.cheat_highlight:\n return haystack\n\n # otherwise, attempt to import the termcolor library\n try:\n from termcolor import colored\n\n # if the import fails, return uncolored text\n except ImportError:\n return haystack\n\n # if the import succeeds, colorize the needle in haystack\n return haystack.replace(needle,\n colored(needle, self._config.cheat_highlight))", "def displayColor(*args, active: bool=True, create: bool=True, dormant: bool=True, list:\n bool=True, queryIndex: int=0, resetToFactory: bool=True, resetToSaved:\n bool=True, q=True, query=True, **kwargs)->Union[None, Any]:\n pass" ]
[ "0.53260326", "0.52767843", "0.524204", "0.51859134", "0.5096391", "0.50732815", "0.5005718", "0.49852437", "0.4893614", "0.4848869", "0.481354", "0.4778124", "0.4775718", "0.47399512", "0.47129112", "0.47054514", "0.46476057", "0.46466988", "0.4642668", "0.4633068", "0.46292138", "0.4622017", "0.46130627", "0.45725158", "0.45725158", "0.45513353", "0.45420447", "0.45343053", "0.45014375", "0.44815132" ]
0.6787234
0
Test creation of a Video and writing/reading to the videocategory table. This test assumes, that there are no entries in the videocategory table! All entries in that table will be deleted!
def test_Dataheap_Video_004_01(self): class VideoCategory(DBDataWrite): """ VideoCategory(data=None, db=None) --> VideoCategory object to database table 'videocategory', data is a `videocategory` string. - get information about the table: $ mysql -h <master-backend-ip> -u mythtv -p<password-from-config.xml> mythconverg MariaDB [mythconverg]> describe videocategory; +-------------+-------------+------+-----+---------+----------------+ | Field | Type | Null | Key | Default | Extra | +-------------+-------------+------+-----+---------+----------------+ | intid | int(10) | NO | PRI | NULL | auto_increment | | category | varchar(128)| NO | | | | +-------------+-------------+------+-----+---------+----------------+ 2 rows in set (0.00 sec) """ _table = 'videocategory' _key = ['category'] _defaults = {'category' : ''} ### end class VideoCategory title = u"Le Dernier Métro" filename = title + u".mkv" vid = Video(db=self.mydb).create ({'title' : title, 'filename': filename, 'host' : self.mydb.getMasterBackend()}) vid.category = u"python_test" vid.update() # find this video and check it vids = self.mydb.searchVideos( title = title ) vid_r = next(vids) # print(vid_r.category) self.assertEqual(vid.category, vid_r.category) print(repr(vid_r)) print(str(vid_r)) # delete the video previously created vid.delete() # delete the previously assigned category vid_category = VideoCategory(u"python_test", db = self.mydb) vid_category.delete() # Although, everything is deletes, the 'autoincrement' values are still updated...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_video_model_functionality(self):\n\n # create a video\n v = Video(title=\"Title of a YouTube Video\", description=\"Description of a YouTube Video\", yt_video_id=\"yfoY53QXEnI\", yt_channel_id=\"UC29ju8bIPH5as8OGnQzwJyA\")\n db.session.add(v)\n db.session.commit()\n\n # video should exist\n self.assertTrue(v)\n\n # video title should be correct in db\n v=Video.query.get(1)\n self.assertEqual(v.title, \"Title of a YouTube Video\")\n\n # there should be exactly one video in the db\n v=Video.query.all()\n self.assertEqual(len(v), 1)", "def test_upload_video(self):\n with self.client:\n path = '../data/example.mp4'\n path = os.path.join(os.path.dirname(__file__), path)\n with open(os.path.abspath(path), 'rb') as file:\n data = dict(file=(file, 'example.mp4'))\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=data)\n file.close()\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('example.mp4', os.listdir(VIDEOS_SAVE_PATH))\n self.assertIn('example.mp4', [video.filename for video in Video.query.all()])", "def test_video_delete(self):\n v1, v2 = make_video(media_id='1234'), make_video(media_id='2345')\n set_resources_and_sync([v1, v2])\n i1 = mpmodels.MediaItem.objects.get(jwp__key=v1.key)\n i2 = mpmodels.MediaItem.objects.get(jwp__key=v2.key)\n set_resources_and_sync([v1])\n self.assertIsNone(mpmodels.MediaItem.objects.get(id=i1.id).deleted_at)\n self.assertIsNotNone(mpmodels.MediaItem.objects_including_deleted.get(id=i2.id).deleted_at)\n self.assertFalse(mpmodels.MediaItem.objects.filter(id=i2.id).exists())", "def test_one_video(self):\n\n # Add video and channel\n self.create_channel('test-channel', 'test123')\n self.create_video('test-channel', 'test123', 'my-video', 'My Video')\n\n # There should be one channel\n response = self.client.get('/api/videos/')\n self.assertEquals(response.json['total-videos'], 1)\n self.assertEquals(len(response.json['videos']), 1)", "def test_basic_functionality(self):\n self.assertEqual(mpmodels.MediaItem.objects.count(), 0)\n video = make_video(media_id='1234', title='test title')\n set_resources_and_sync([video])\n self.assertEqual(mpmodels.MediaItem.objects.count(), 1)\n item = mpmodels.MediaItem.objects.get(jwp__key=video.key)\n self.assertEqual(item.title, 'test title')", "def test_create_youtube(self):\n # Count the number of records before the save\n existing_records_count = Track.objects.all().count()\n post_data = {\n 'source_type': 'youtube',\n 'source_id': 'StTqXEQ2l-Y',\n }\n\n with transaction.atomic():\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n\n new_records_count = Track.objects.all().count()\n # Ensure request was successful\n self.assertEqual(resp.status_code, 200)\n # Ensure a new record was created in the database\n self.assertEqual(existing_records_count+1, new_records_count)", "def test_api_video_create_by_playlist_admin(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\n \"lti_id\": \"video_one\",\n \"playlist\": str(playlist.id),\n \"title\": \"Some video\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 1)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(\n response.json(),\n {\n \"active_stamp\": None,\n \"description\": \"\",\n \"has_transcript\": False,\n \"id\": str(models.Video.objects.get().id),\n \"is_ready_to_show\": False,\n \"live_info\": {},\n \"live_state\": None,\n \"live_type\": None,\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"should_use_subtitle_as_transcript\": False,\n \"show_download\": True,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"title\": \"Some video\",\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"xmpp\": None,\n },\n )", "def test_api_video_create_for_nonexistent_playlist(self):\n user = factories.UserFactory()\n some_uuid = uuid.uuid4()\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\"lti_id\": \"video_one\", \"playlist\": some_uuid, \"title\": \"Some video\"},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 403)", "def upload_video(self, video_ID, username, title): #WORKS\n try:\n view_count = 0\n self.cur.execute(\"INSERT INTO videos VALUES(\\\"{}\\\", \\\"{}\\\", \\\"{}\\\", {}, NULL)\".format(video_ID, title, username, view_count))\n self.db.commit()\n except:\n self.db.rollback()", "def test_model_can_create_a_film(self):\n self.assertEqual(self.film.title, \"test_a\")", "def test_video_thumbnail(self):\n data = TestData().load_users().load_galleries()\n album = data.gallery_b7w.top_album\n\n video1 = Video.objects.create(album=album, uid='56433514', type=Video.VIMIO)\n VideoController.thumbnail_url(video1)\n self.assertHttpOk(VideoController.thumbnail_url(video1))\n\n video2 = Video.objects.create(album=album, uid='7dGGPlZlPQw', type=Video.YOUTUBE)\n self.assertHttpOk(VideoController.thumbnail_url(video2))", "def main(video_folder, database_location):\n \n # Validate parameters\n if not path.isdir(video_folder):\n print 'Invalid video directory path: {}'.format(video_folder)\n return\n \n if not path.exists(database_location):\n print 'Invalid database location: {}'.format(database_location)\n return\n \n # Connect to database\n conn = sqlite3.connect(database_location)\n cursor = conn.cursor()\n\n # Get data\n video_files = listdir(video_folder)\n video_ids = [f.split('.')[0] for f in video_files if f.endswith('.avi')]\n \n # Stats\n true_positives = 0 # Category 1 classified as category 1\n false_positives = 0 # Categories 2 or 3 classified as category 1\n true_negatives = 0 # Categories 2 or 3 classified as categories 2 or 3\n false_negatives = 0 # Category 1 classified as categories 2 or 3\n\n for video_id in video_ids:\n \n # Make sure both a .avi and .wav file are available for this id\n video_location = path.join(video_folder, '{}.avi'.format(video_id))\n audio_location = path.join(video_folder, '{}.wav'.format(video_id))\n \n if not path.exists(video_location) or not path.exists(audio_location):\n continue\n \n # Process video\n print 'Processing `{}`...'.format(video_id) \n category_classifier = process_video.process(\n video_location,\n audio_location,\n database_location\n )\n\n # Get actual label\n metadata = video_id.split('-')\n db_video_id = ''.join(metadata[:-2])\n db_start_time_ms = metadata[-2]\n\n query = '''\n SELECT category FROM classifications\n WHERE video_id = ?\n AND start_time_ms = ?\n '''\n cursor.execute(query, (db_video_id, db_start_time_ms))\n result = cursor.fetchone()\n \n # (Skip if the video isn't labeled)\n if result is None:\n print 'No label - skipped'\n continue\n \n category_actual = result[0]\n\n # Mark as TP, FP, TN, or FN\n if category_classifier == 1:\n if category_actual == 1:\n true_positives += 1\n mark = 'TP'\n else:\n false_positives += 1\n mark = 'FP'\n else:\n if category_actual == 1:\n false_negatives += 1\n mark = 'FN'\n else:\n true_negatives += 1\n mark = 'TN'\n \n print 'Classified as category {} ({})'.format(\n category_classifier,\n mark\n )\n \n print\n\n print\n\n print 'RESULTS:'\n print\n print 'True Positives (TP) =', true_positives\n print 'False Positives (FP) =', false_positives\n print 'True Negatives (TN) =', true_negatives\n print 'False Negatives (FN) =', false_negatives\n\n print\n print 'Recall [TP / (TP + FN)] =', float(true_positives) / (true_positives + false_negatives)\n print 'Precision [TP / (TP + FP)] =', float(true_positives) / (true_positives + false_positives)", "def test_api_video_create_by_playlist_admin_missing_title(self):\n user = factories.UserFactory()\n playlist = factories.PlaylistFactory()\n factories.PlaylistAccessFactory(\n role=models.ADMINISTRATOR, playlist=playlist, user=user\n )\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\"playlist\": str(playlist.id)},\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 0)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json(),\n {\"errors\": [{\"title\": [\"This field is required.\"]}]},\n )", "def test_video_constructor(self):\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': create_youtube_string(self.item_descriptor),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': json.dumps(OrderedDict({\"en\": \"English\", \"uk\": u\"Українська\"})),\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?'),\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def test_create_episode(self):\n episode = self._create_sample_episode()\n\n self.assertEqual(\n self.storage.get_episode(episode.study_id, episode.session_id,\n episode.id), episode)", "def test_api_video_create_by_organization_admin(self):\n user = factories.UserFactory()\n organization = factories.OrganizationFactory()\n factories.OrganizationAccessFactory(\n role=models.ADMINISTRATOR, organization=organization, user=user\n )\n playlist = factories.PlaylistFactory(organization=organization)\n\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(user.id)\n jwt_token.payload[\"user\"] = {\n \"id\": str(user.id),\n \"username\": user.username,\n }\n\n self.assertEqual(models.Video.objects.count(), 0)\n\n response = self.client.post(\n \"/api/videos/\",\n {\n \"lti_id\": \"video_one\",\n \"playlist\": str(playlist.id),\n \"title\": \"Some video\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(models.Video.objects.count(), 1)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(\n response.json(),\n {\n \"active_stamp\": None,\n \"description\": \"\",\n \"has_transcript\": False,\n \"id\": str(models.Video.objects.get().id),\n \"is_ready_to_show\": False,\n \"live_info\": {},\n \"live_state\": None,\n \"live_type\": None,\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"should_use_subtitle_as_transcript\": False,\n \"show_download\": True,\n \"thumbnail\": None,\n \"timed_text_tracks\": [],\n \"title\": \"Some video\",\n \"upload_state\": \"pending\",\n \"urls\": None,\n \"xmpp\": None,\n },\n )", "def test_one_video(self, mock_filename, mock_url_name):\n mock_filename.return_value = 'filename'\n mock_url_name.return_value = 'localhost'\n p = Post.objects.get(pk=self.post3_id)\n v = Video.objects.get(pk=self.video2_id)\n v.file.save('filename', BytesIO(b'file'), save=True)\n serialized_data = PostSerializer(p).data\n\n self.assertEqual(serialized_data['message'], self.post3_message)\n self.assertEqual(serialized_data['fund']['name'], self.fund_name)\n self.assertEqual(serialized_data['id'], self.post3_id)\n self.assertLess(\n datetime.strptime(serialized_data['created_at'], '%Y-%m-%dT%H:%M:%S.%f%z'),\n datetime.now(timezone.utc)\n )\n self.assertEqual(serialized_data['videos'][0]['url'], 'localhost')\n self.assertEqual(serialized_data['videos'][0]['id'], self.video2_id)\n self.assertEqual(serialized_data['videos'][0]['description'], self.video2_description)\n self.assertLess(\n datetime.strptime(serialized_data['created_at'], '%Y-%m-%dT%H:%M:%S.%f%z'),\n datetime.strptime(serialized_data['videos'][0]['created_at'], '%Y-%m-%dT%H:%M:%S.%f%z')\n )", "def test_category_save(database):\n category = Category(title=\"Test Category\")\n category.save()\n\n assert category.title == \"Test Category\"", "def test_video_removal(self):\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n self._assert_video_removal(self.url, edx_video_id, 1)", "def insert_video(videoId, rowData):\n video = VideoData(\n id = videoId,\n title = rowData['title'],\n description = rowData['description'],\n channelTitle = rowData['channelTitle'],\n publishedAt = fromIsoToDateTime(rowData['publishedAt']),\n )\n \n try:\n video.save()\n except IntegrityError as e:\n return None\n\n insert_thumbnail(rowData['thumbnails']['default'],'DE',video)\n insert_thumbnail(rowData['thumbnails']['medium'],'ME',video)\n insert_thumbnail(rowData['thumbnails']['high'],'HI',video)\n\n return video", "def test_video_constructor(self):\r\n sources = {\r\n 'main': u'example.mp4',\r\n u'mp4': u'example.mp4',\r\n u'webm': u'example.webm',\r\n }\r\n\r\n context = self.item_descriptor.render('student_view').content\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state',\r\n 'data_dir': getattr(self, 'data_dir', None),\r\n 'show_captions': 'true',\r\n 'handout': None,\r\n 'display_name': u'A Name',\r\n 'end': 3610.0,\r\n 'id': self.item_descriptor.location.html_id(),\r\n 'sources': sources,\r\n 'speed': 'null',\r\n 'general_speed': 1.0,\r\n 'start': 3603.0,\r\n 'saved_video_position': 0.0,\r\n 'sub': u'a_sub_file.srt.sjson',\r\n 'track': None,\r\n 'youtube_streams': '1.00:OEoXaMPEzfM',\r\n 'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', True),\r\n 'yt_test_timeout': 1500,\r\n 'yt_api_url': 'www.youtube.com/iframe_api',\r\n 'yt_test_url': 'gdata.youtube.com/feeds/api/videos/',\r\n 'transcript_download_format': 'srt',\r\n 'transcript_download_formats_list': [{'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'}],\r\n 'transcript_language': u'en',\r\n 'transcript_languages': '{\"en\": \"English\"}',\r\n 'transcript_translation_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'translation'\r\n ).rstrip('/?'),\r\n 'transcript_available_translations_url': self.item_descriptor.xmodule_runtime.handler_url(\r\n self.item_descriptor, 'transcript', 'available_translations'\r\n ).rstrip('/?')\r\n }\r\n\r\n self.assertEqual(\r\n context,\r\n self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context),\r\n )", "def test_create_movie_with_tags(self):\n tag1 = sample_tag(user=self.user, name='Tag 1')\n tag2 = sample_tag(user=self.user, name='Tag 2')\n payload = {\n 'title': 'Test movie with two tags',\n 'tags': [tag1.id, tag2.id],\n 'time_minutes': 30,\n 'ticket_price_USD': 10.00\n }\n res = self.client.post(MOVIES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n movie = Movie.objects.get(id=res.data['id'])\n tags = movie.tags.all()\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)", "def test_create_movie(self):\n # this functionality requires the executive producer token\n res = self.client().post('/movies', headers={\n 'Authorization': \"Bearer {}\".format(self.executive_producer_token)\n }, json=self.VALID_NEW_MOVIE)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 201)\n self.assertTrue(data[\"success\"])\n self.assertIn('created', data)", "def test_video_removal_multiple_courses(self):\n # remove video from course1\n edx_video_id = 'test1'\n remove_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n response = self.client.delete(remove_url, HTTP_ACCEPT=\"application/json\")\n self.assertEqual(response.status_code, 204)\n\n # verify that video is only deleted from course1 only\n self._assert_video_removal(self.url, edx_video_id, 1)\n self._assert_video_removal(self.get_url_for_course_key(self.course2.id), edx_video_id, 0)", "def test_create_category(self):\n pass", "def test_api_video_create_token_user_playlist_preexists(self):\n jwt_token = AccessToken()\n response = self.client.post(\n \"/api/videos/\", HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\"\n )\n self.assertEqual(response.status_code, 401)\n self.assertFalse(models.Video.objects.exists())", "def test_no_video_image(self):\n edx_video_id = 'test1'\n get_videos_url = reverse_course_url('videos_handler', self.course.id)\n video_image_upload_url = self.get_url_for_course_key(self.course.id, {'edx_video_id': edx_video_id})\n with make_image_file(\n dimensions=(settings.VIDEO_IMAGE_MIN_WIDTH, settings.VIDEO_IMAGE_MIN_HEIGHT),\n ) as image_file:\n self.client.post(video_image_upload_url, {'file': image_file}, format='multipart')\n\n val_image_url = get_course_video_image_url(course_id=self.course.id, edx_video_id=edx_video_id)\n\n response = self.client.get_json(get_videos_url)\n self.assertEqual(response.status_code, 200)\n response_videos = json.loads(response.content.decode('utf-8'))[\"videos\"]\n for response_video in response_videos:\n if response_video['edx_video_id'] == edx_video_id:\n self.assertEqual(response_video['course_video_image_url'], val_image_url)\n else:\n self.assertEqual(response_video['course_video_image_url'], None)", "def test_api_video_create_anonymous(self):\n response = self.client.post(\"/api/videos/\")\n self.assertEqual(response.status_code, 401)\n self.assertFalse(models.Video.objects.exists())", "def test_recreate_deleted_item(self):\n v1 = make_video(media_id='1234', title='testing')\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')\n i1.delete()\n\n set_resources_and_sync([v1])\n i1 = mpmodels.MediaItem.objects.filter(jwp__key=v1.key).first()\n self.assertIsNotNone(i1)\n self.assertEqual(i1.title, 'testing')", "def test_type_youtube():\n resource = models.MediaResource(youtube_id=\"dQw4w9WgXcQ\")\n\n assert resource.type == models.MediaResource.TYPE_YOUTUBE" ]
[ "0.7072598", "0.66031647", "0.656258", "0.65525234", "0.65179473", "0.631983", "0.6282366", "0.6220394", "0.6195253", "0.6144781", "0.60975116", "0.60858005", "0.60748005", "0.6064746", "0.6035256", "0.60066295", "0.59810054", "0.59662133", "0.5925972", "0.58940107", "0.58623666", "0.58600956", "0.58546597", "0.58455294", "0.58403933", "0.58266777", "0.58238506", "0.5812392", "0.58112645", "0.5801074" ]
0.7127327
0
Make sure entered title does not already exist
def title_exists(form, field): if Entry.select().where(Entry.title ** field.data).exists(): raise ValidationError('That title is already in use.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_title_input(title):\n if len(title) != 0:\n clear()\n return True\n\n else:\n clear()\n print('** Please enter a task title **')\n return False", "def checkTitle(self,event=None):\r\n if self.title.getVal() not in self.titleList:\r\n self.titleList.append(self.title.getVal())\r\n self.titleList.sort()\r\n self.title.updateVals(self.titleList)", "def set_title(self, title):\n if check_data_exist(title) is True:\n self.title = title.text", "def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False", "def enter_title():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'title': ''}\n\n while not valid_data:\n input_data['title'] = get_input(\"Title of the task: \")\n if re.match('[\\w]+', input_data['title']):\n valid_data = True\n clean_scr()\n\n return input_data['title']", "def try_create_uniqe_title(self,title,plan_id):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,plan_id):\n return new_title\n return False\n else:\n return False", "def validateTitle(title):\n \n if not(title) or not(title.strip()):\n return \"You must supply a title.\"\n else:\n return None", "def _check_field_title_duplicate(self, doi: Doi):\n query_criterias = {\"title\": [doi.title]}\n\n # Query database for rows with given title value.\n columns, rows = self._database_obj.select_latest_rows(query_criterias)\n\n # keep rows with same title BUT different identifier\n rows_with_different_identifier = [row for row in rows if row[columns.index(\"identifier\")] != doi.pds_identifier]\n\n if rows_with_different_identifier:\n identifiers = \",\".join([row[columns.index(\"identifier\")] for row in rows_with_different_identifier])\n status = \",\".join([row[columns.index(\"status\")] for row in rows_with_different_identifier])\n dois = \",\".join([row[columns.index(\"doi\")] for row in rows_with_different_identifier])\n\n msg = (\n f\"The title '{doi.title}' has already been used for records \"\n f\"{identifiers}, status: {status}, doi: {dois}. \"\n \"A different title should be used.\\nIf you want to bypass this \"\n \"check, rerun the command with the --force flag provided.\"\n )\n\n raise DuplicatedTitleDOIException(msg)", "def __Verify(self):\n if not self.title:\n raise db.Error('Missing title; required.')", "def _validate_title(self, attribute: attr.Attribute, value: str):\n\n if not isinstance(value, str) or len(value) <= 0:\n raise ValueError(\n f\"Window title must be a non-empty string, received {value!r}\"\n )", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE", "def test_required_fields_title(self):\n\n del self.validator.adata.uns[\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'title' in 'uns' is not present.\"]\n )", "def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False", "def get_new_artwork_name():\n artwork_name = input('Please enter title of artwork: ')\n while not controls_utils.artwork_name_is_unique(artwork_name):\n print('Artwork name is taken')\n artwork_name = input('Please enter title of artwork: ')\n return artwork_name", "def set_title(self, title):\n \n self.name = title or \"\"", "def test_validate_title_identical(self):\n with self.assertRaises(ValidationError):\n self.make_project(\n title='TestCategory',\n type=PROJECT_TYPE_PROJECT,\n parent=self.category,\n )", "def clean(self):\n if self.category is None and type(self)._default_manager.filter(\n category = self.category,\n title = self.title\n ).exclude(pk=self.pk).exists():\n raise self.unique_error_message(self.__class__, ('category', 'title'))", "def handle_title(self, tag, attrs):\n self.title = 'present'", "def set_title(self, title):\n\t\tpass", "def titleValidator(self, title):\n if type(title) != str:\n API.abort(400, error_messages[11]['Int_title'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_]+$)\", title) or title.isspace():\n API.abort(\n 400, error_messages[12]['wrong_format_title'])\n\n return True", "def change_title(self, title):\n if isinstance(title, str):\n self.title = title\n elif title is None:\n self.title = None\n else:\n raise TypeError('str expect, not {}'.format(type(title)))", "def is_bad_title(title):\n bad_examples = [\"under construction\", \"test page\", \"redirect\", \"index of\", \"none \", \"expired\", \"coming soon\",\n \"error \", \"domain pending\", \"at directnic\", \"pending validation\", \"website disabled\",\n \"US Zip Code Information\", # verified we need this, urls like 00000.us, 00001.us end up at zipcode.com\n \"domain default page\", \"non-existent domain\", \"v-webs hosting services\",\n \"be back soon\", \"something went wrong\", \"Lunarpages Web Hosting Placeholder Page\",\n \"Félicitations ! Votre domaine a bien été créé chez OVH !\", \"Domaine r&eacute;serv&eacute;\",\n \" - For Sale | Undeveloped\", \"Yahoo&#39;s Aabaco Small Business: Websites, Ecommerce, Email &amp; Local Listings\",\n \"service unavailable\", \"website disabled\", \"404 Not Found\", \"Not Found\", \"Page cannot be found\"\n ]\n for bad_title in bad_examples:\n if bad_title.lower() in title.lower():\n debug(bad_title)\n return hit(bad_title)\n\n exact_matches = [\"web hosting\", \"webhosting\"]\n for ma in exact_matches:\n if title.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").lower() == ma:\n debug(ma)\n return hit(ma)\n return False", "def _checkPlaceholderText (self):\n # If the text entered is the placeholder text, simply remove it:\n if self._nameEntry.get() == NPKR_ENTRY_INITIAL_TEXT:\n self._nameEntry.set(\"\")", "def check_if_already_exists(list_name, title, description):\n\n for item in list_name:\n if item['title'] == title:\n return 'Sorry, This title has already been used in another question'\n if item['description'] == description:\n return 'Sorry, This description has already been used in another question'", "def update_title(self, title):\n if type(title) != str:\n raise Exception(\"title is not a string\")\n\n self.__title_var.set(title)", "def check_valid_title(title):\n title_issues = TitleIssues(title_contains_nsfw=title_contains_nsfw(title))\n return title_issues", "def found_title(newtitle=None):\n global title, body\n if title and len(body):\n print(title)\n if title not in ('DEDICATED', 'INTRODUCTION'):\n body = parse_body(body)\n thebook.append(Section(title, body))\n body = []\n title = newtitle", "def edit_title(self, task, new_title):\n raise ValueError(\"cannot edit title in 'In Progress' status\")", "def test_Entry_title(self):\n test_entry = self.create_Entry()\n self.assertTrue(test_entry.title == str(test_entry))", "def test_title(self):\n\n # list instead of string\n self.validator.adata.uns[\"title\"] = [\"title\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: '['title']' in 'uns['title']' is not valid, \"\n \"it must be a string.\"\n ],\n )" ]
[ "0.76412404", "0.758198", "0.73525643", "0.72878885", "0.69870245", "0.6916602", "0.6883601", "0.6866875", "0.6806847", "0.6769855", "0.66781723", "0.6619108", "0.64249706", "0.64171153", "0.63547355", "0.6336048", "0.63264096", "0.63135356", "0.6247816", "0.62238014", "0.6174995", "0.61670893", "0.6163757", "0.6052932", "0.6047032", "0.60465956", "0.60113454", "0.60093594", "0.59871066", "0.59381545" ]
0.79909974
0
function to detect face and classify whether a mask is worn or not
def detectFaceAndClassify(faceNet, faceMaskClassifier, testImagePath, threshold): # load the input test image from disk image = cv2.imread(testImagePath) # making a copy of image and finding the image spatial dimensions orig = image.copy() (h, w) = image.shape[:2] # construct a blob from the image to pass to the network # using standard weights for the face detection model for image preprocessing blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0)) # obtain the face detections by passing the blob through the network print("computing face detections...") faceNet.setInput(blob) faceDetections = faceNet.forward() # loop over the detections to classify them and form bounding boxes and labels for i in range(0, faceDetections.shape[2]): # extract only confident detections using the confidence/probability # associated with the detection confidence = faceDetections[0, 0, i, 2] # filter out weak detections by ensuring the confidence is # greater than the minimum confidence 0.5 or input variable if confidence > threshold: # extract bounding box dimensions and face Region of intrest for classification faceROI, startX, startY, endX, endY = extractBoxAndFaceROI(image, faceDetections, itemNum=i, height=h, width=w) faceROI = np.expand_dims(faceROI, axis=0) # Passing the pre-processed image with classification model to check if there is a mask or not (mask, withoutMask) = faceMaskClassifier.predict(faceROI)[0] # (mask, withoutMask) = faceMaskClassifier.predict(faceROI) # find the class and associated colour to use for the bounding box and text label = "Mask" if mask > withoutMask else "No Mask" color = (0, 255, 0) if label == "Mask" else (0, 0, 255) # include the probability of prediction in the label of the bounding box label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) # forming bounding box rectangle and display the label the output image frame cv2.putText(image, label, (startX, startY - 10), cv2.FONT_HERSHEY_COMPLEX, 0.45, color, 2) cv2.rectangle(image, (startX, startY), (endX, endY), color, 2) # show the output image cv2.imshow("Output", image) # display the image still a key is pressed, when key is pressed program is terminated cv2.waitKey(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def face_detect(sess, net, image_name):\n\n\t# Load the demo image\n\tim_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n\tim = cv2.imread(im_file)\n\n\t# Detect all object classes and regress object bounds\n\ttimer = Timer()\n\ttimer.tic()\n\t# scores, boxes = im_detect(sess, net, im)\n\tscores, boxes, eyes, smiles = im_detect_ori(sess, net, im)\n\ttimer.toc()\n\tprint ('Detection took {:.3f}s for '\n\t\t\t'{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n\t# Visualize detections for each class\n\t# im = im[:, :, (2, 1, 0)]\n\t# fig, ax = plt.subplots(figsize=(8, 8))\n\t# ax.imshow(im, aspect='equal')\n\n\tCONF_THRESH = 0.9\n\tNMS_THRESH = 0.3\n\tfor cls_ind, cls in enumerate(CLASSES[20:]):\n\t\tcls_ind += 20 # because we skipped everything except face\n\t\tcls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\t\tcls_scores = scores[:, cls_ind]\n\t\tdets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)\n\t\tkeep = nms(dets, NMS_THRESH)\n\t\tdets = dets[keep, :]\n\t\teye = eyes[keep, :]\n\t\tsmile= smiles[keep, :]\n\n\tinds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n\tface_num = len(inds)\n\tprint '{} faces detected!'.format(face_num)\n\tdets = dets[inds, :]\n\teye = eye[inds, 1]\n\tsmile = smile[inds, 1]\n\n\treturn dets, eye, smile", "def classify_face(im):\n faces_death = get_encoded_faces_deaths()\n faces_arrested = get_encoded_faces_arrested()\n faces_wanted = get_encoded_faces_wanted()\n\n faces_encoded_death = list(faces_death.values())\n known_face_names_death = list(faces_death.keys())\n\n faces_encoded_arrested = list(faces_arrested.values())\n known_face_names_arrested = list(faces_arrested.keys())\n\n faces_encoded_wanted = list(faces_wanted.values())\n known_face_names_wanted = list(faces_wanted.keys())\n\n img = cv2.imread(im, 1)\n face_locations = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img,face_locations)\n face_names = []\n find_in_db(im,known_face_names_death,unknown_face_encodings,face_names,faces_encoded_death,\"unnatural_death_images/unnatural_death_images\")\n find_in_db(im,known_face_names_arrested,unknown_face_encodings,face_names,faces_encoded_arrested,\"ArrestPerson_images\")\n find_in_db(im,known_face_names_wanted,unknown_face_encodings,face_names,faces_encoded_wanted,\"wanted\")", "def __detect_face(self, img):\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n return self.detector(gray, 1)", "def detect_face(img:np.ndarray, bounds: tuple=None) -> np.ndarray:\n if not bounds:\n bounds = []\n bounds.append(np.array([0, 60, 80], dtype = \"uint8\"))\n bounds.append(np.array([20, 255, 255], dtype = \"uint8\"))\n\n converted = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(converted, bounds[0], bounds[1])\n \n # apply a series of erosions and dilations to the mask\n # using an elliptical kernel\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n skinMask = cv2.erode(skinMask, kernel, iterations = 2)\n skinMask = cv2.dilate(skinMask, kernel, iterations = 2)\n \n # blur the mask to help remove noise, then apply the\n # mask to the frame\n skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)\n skin = cv2.bitwise_and(img, img, mask = skinMask)\n\n return skin", "def classify_face(im):\r\n faces = get_encoded_faces()\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n \"\"\"\r\n Resize optinal \r\n \"\"\"\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n \"\"\"\r\n All the photo lables in the faces foler end with (number) so a simiple .find(\"(\") command takes the () away from\r\n the label leaving us with the full name of the person\r\n\r\n \"\"\"\r\n\r\n result = name.find('(') \r\n fullname = (name[:result])\r\n \"\"\"\r\n If face_recogntion module recognizes a face but that face is not in the faces module then \r\n it will print unknown and we print 12345678 to use it on the start attednace program \r\n\r\n \"\"\"\r\n if (name == \"Unknown\"):\r\n print(\"12345678\")\r\n else:\r\n \"\"\"\r\n f'{len(face_locayion)}-people - will return the number of people in photo taken by Nao'\r\n \"\"\"\r\n print (f'{len(face_locations)}-people')\r\n print (fullname)\r\n print(courseid)\r\n print (lateornot)\r\n c34 = fullname.find(' ')\r\n firstname = (fullname[:c34])\r\n lastname = (fullname[c34:])\r\n \"\"\"\r\n We get all the data courseid , fristname , lastname, datetime1,and late or not and submited on the website \r\n \r\n\r\n \"\"\"\r\n login_data = {\r\n\t 'Course': courseid,\r\n\t 'FirstName': firstname,\r\n\t 'LastName': lastname,\r\n\t 'Date': datetime2,\r\n\t 'Attendance': 'on',\r\n\t 'Late': latev,\r\n\t 'submitbutton': 'Submit'\r\n }\r\n if(fullname == \"Unknow\"):\r\n \tprint(\"I-dont-know-you\")\r\n else:\r\n \r\n with requests.Session() as s:\r\n \turl = \"https://rbattendance.000webhostapp.com/update.php\"\r\n \tr = s.get(url)\r\n \tsoup = BeautifulSoup(r.content, 'html5lib')\r\n \tr = s.post(url, data = login_data)\r\n \t#print(r.content)\r\n \r\n \r\n\r\n\r\n\r\n\r\n \"\"\"\r\n This for loop is reponsible for drawing on the image \r\n \"\"\"\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_DUPLEX\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n \r\n \r\n while True:\r\n #cv2.imshow('Video', img)\r\n #if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names", "def detect(self, frame, foreground_mask):\n pass", "def detector(videoframe, facedetection, maskdetection):\n (h, w) = videoframe.shape[:2]\n blobimage = cv2.dnn.blobFromImage(videoframe, 1.0, (224, 224), (104.0, 177.0, 123.0))\n\n facedetection.setInput(blobimage)\n ffinding = facedetection.forward()\n\n face_list = []\n locations = []\n predictions = []\n\n for i in range(0, ffinding.shape[2]):\n credence = ffinding[0, 0, i, 2]\n if credence > 0.6:\n case = ffinding[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x_start, y_start, x_end, y_end) = case.astype(\"int\")\n (x_start, y_start) = (max(0, x_start), max(0, y_start))\n (x_end, y_end) = (min(w - 1, x_end), min(h - 1, y_end))\n\n image = videoframe[y_start:y_end, x_start:x_end]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = img_to_array(image)\n image = preprocess_input(image)\n face_list.append(image)\n locations.append((x_start, y_start, x_end, y_end))\n\n if len(face_list) > 0:\n face_list = np.array(face_list, dtype=\"float32\")\n predictions = maskdetection.predict(face_list, batch_size=32)\n return (locations, predictions)", "def detect(self, frame): \n return self.__detect_faces(frame)", "def im_detect(net, target_data,im_data, im_info, features_given=True):\n\n cls_prob, rois = net(target_data, im_data, im_info,\n features_given=features_given)\n scores = cls_prob.data.cpu().numpy()[0,:,:]\n zs = np.zeros((scores.size, 1))\n scores = np.concatenate((zs,scores),1)\n boxes = rois.data.cpu().numpy()[0,:, :]\n\n return scores, boxes", "def detect(self, mask):\n # 1) Return Non zero indices\n det_idx = np.where(mask > 0.0)\n idx_x, idx_y = det_idx[0], det_idx[1]\n # 2) Create 1x1 box for each pixel detected.\n detections = []\n for i in range(0, len(idx_x)):\n x, y = idx_x[i], idx_y[i]\n detections.append((x, y, x+1, y+1, 1)) # x1, y1, x2, y2, area\n # 3) merge boxes\n bounding_boxes = self.bounding_boxes(detections)\n return bounding_boxes", "def _detect_face_ResNet10_SSD(self, img):\n\n detector = self.detector\n (h, w) = img.shape[:2]\n # construct a blob from the image\n img_blob = cv2.dnn.blobFromImage(\n cv2.resize(img, (300, 300)),\n 1.0,\n (300, 300),\n (104.0, 177.0, 123.0),\n swapRB=False,\n crop=False,\n )\n\n detector.setInput(img_blob)\n detections = detector.forward()\n\n (start_x, start_y, end_x, end_y) = (0, 0, 0, 0)\n faces_bb = []\n if len(detections) > 0:\n # we're making the assumption that each image has only ONE\n # face, so find the bounding box with the largest probability\n for i in range(0, detections.shape[2]):\n\n score = detections[0, 0, i, 2]\n\n # ensure that the detection greater than our threshold is\n # selected\n if score > self.confidence:\n # compute the (x, y)-coordinates of the bounding box for\n # the face\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n box = box.astype(\"int\")\n (start_x, start_y, end_x, end_y) = box\n\n # print(\"start x : {}\".format(start_x))\n # print(\"start y : {}\".format(start_y))\n # print(\"end x : {}\".format(end_x))\n # print(\"end y : {}\".format(end_y))\n\n # extract the face ROI and grab the ROI dimensions\n face = img[start_y:end_y, start_x:end_x]\n\n (fh, fw) = face.shape[:2]\n # ensure the face width and height are sufficiently large\n if fw < 20 or fh < 20:\n pass\n else:\n faces_bb.append(box)\n\n if len(faces_bb) > 0:\n faces_bb = np.array(faces_bb)\n\n return faces_bb", "def detect_faces(self, img):\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n\n nrof_faces = bounding_boxes.shape[0]\n img_size = np.asarray(img.shape)[0:2]\n\n faces = []\n faces_rects = []\n\n for i in range(nrof_faces):\n det = bounding_boxes[i,0:4]\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-5/2, 0)\n bb[1] = np.maximum(det[1]-5/2, 0)\n bb[2] = np.minimum(det[2]+5/2, img_size[1])\n bb[3] = np.minimum(det[3]+5/2, img_size[0])\n faces.append(img[bb[1]:bb[3], bb[0]:bb[2], :])\n faces_rects.append({'name': 'none', 'x': bb[0], 'y': bb[1], 'w': bb[2]-bb[0], 'h': bb[3]-bb[1]})\n\n return [img, faces, faces_rects]", "def detect_face(self, img):\n # Fetch face location from the frame with 128 encoding of face landmarks\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def detect_face(gray):\r\n face_cascade = cv2.CascadeClassifier(classifier_file_name)\r\n faces = face_cascade.detectMultiScale(gray, scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=flags)\r\n return faces", "def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]", "def is_existing_face(image, trackers, face):\n\n x1, y1, w1, h1 = face\n face_mask = np.zeros_like(image)\n face_mask[y1:y1+h1, x1:x1+w1] = 1\n for t in trackers:\n try:\n x,y,w,h = t.bounding_box\n t_mask = np.zeros_like(image)\n t_mask[y:y+h, x:x+w] = 1\n\n union = np.sum(np.bitwise_or(face_mask, t_mask))\n intersection = np.bitwise_and(face_mask, t_mask)\n if float(np.sum(intersection))/union > 0.3 or float(np.sum(intersection))/np.sum(t_mask+1) > 0.7:\n return (t, True)\n except Exception:\n pass\n \n return (None, False)", "def get_classification(self, image):\n\n temp = cv2.cvtColor(cv2.GaussianBlur(image,(5,5),0), cv2.COLOR_BGR2HSV)\n\n maskR = cv2.inRange(temp, np.array([0, 195, 240]), np.array([5, 215, 255]))\n maskY = cv2.inRange(temp, np.array([28, 195, 240]), np.array([35, 215, 255]))\n maskG = cv2.inRange(temp, np.array([60, 195, 240]), np.array([67, 215, 255]))\n\n filt_r = cv2.bitwise_and(temp,temp, mask= maskR)\n filt_y = cv2.bitwise_and(temp,temp, mask= maskY)\n filt_g = cv2.bitwise_and(temp,temp, mask= maskG)\n\n # Bitwise-AND mask and original image\n self.debug_im1 = filt_r\n self.debug_im2 = filt_y\n self.debug_im3 = filt_g\n status = TrafficLight.UNKNOWN\n\n if np.sum(maskR>10):\n print('detected red')\n status = TrafficLight.RED\n elif np.sum(maskY>10):\n print('detected yellow')\n status = TrafficLight.YELLOW\n elif np.sum(maskG>10):\n print('detected green')\n status = TrafficLight.GREEN\n\n # self.debug()\n return status", "def brain_has_lead_image(self, brain=None):", "def face_detector( img_path, face_cascade):\n img = cv2.imread( img_path )\n gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY )\n faces = face_cascade.detectMultiScale( gray )\n if len( faces ) > 0:\n return True\n else:\n return False", "def im_detect(net, im, boxes):\n blobs, unused_im_scale_factors = _get_blobs(im, boxes)\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n for i in range(len(blobs['data'])):\n if cfg.DEDUP_BOXES > 0:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'][i] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'][i] = blobs['rois'][i][index, :]\n boxes_tmp = boxes[index, :].copy()\n else:\n boxes_tmp = boxes.copy()\n t_data = blobs['data'][i].astype(np.float32, copy=False)\n #t_data = t_data.reshape((1, t_data.shape[0], t_data.shape[1], t_data.shape[2], t_data.shape[3]))\n data_height, data_width = t_data.shape[1], t_data.shape[2]\n im_data = torch.FloatTensor(t_data).cuda()\n im_data = im_data.permute(0, 3, 1, 2).contiguous() #.view(3, data_height, data_width)\n LIM = 2000 # split ROIs due to memory issue\n if cfg.TEST.USE_FLIPPED :\n blobs['data'][i] = blobs['data'][i][:, :, ::-1, :]\n width = blobs['data'][i].shape[2]\n t_data = blobs['data'][i].astype(np.float32, copy=False)\n data_height, data_width = t_data.shape[1], t_data.shape[2]\n #im_data = torch.FloatTensor(t_data).cuda()\n im_data_flip = torch.from_numpy(t_data.copy()).cuda()\n im_data_flip = im_data_flip.permute(0, 3, 1, 2).contiguous()#.view(3, data_height, data_width)\n #im_data = im_data[...,::-1]\n for j in range (int(np.ceil(blobs['rois'][i].shape[0] / LIM))) :\n t_rois = blobs['rois'][i][j*LIM:(j+1)*LIM].astype(np.float32, copy=False)\n im_rois = torch.FloatTensor(t_rois).cuda()\n ic_prob, ic_prob1, ic_prob2 = net(im_data, im_rois)\n scores_tmp = ic_prob + ic_prob1 + ic_prob2\n pred_boxes_small = np.tile(boxes_tmp[j*LIM : (j+1)*LIM], (1, scores_tmp.shape[2]))\n\n if cfg.TEST.USE_FLIPPED:\n #pdb.set_trace()\n oldx1 = blobs['rois'][i][j*LIM:(j+1)*LIM, 1].copy()\n oldx2 = blobs['rois'][i][j*LIM:(j+1)*LIM, 3].copy()\n blobs['rois'][i][j*LIM:(j+1)*LIM, 1] = width - oldx2 - 1\n blobs['rois'][i][j*LIM:(j+1)*LIM, 3] = width - oldx1 - 1\n assert (blobs['rois'][i][j*LIM:(j+1)*LIM, 3] >= blobs['rois'][i][j*LIM:(j+1)*LIM, 1]).all()\n t_rois = blobs['rois'][i][j*LIM:(j+1)*LIM].astype(np.float32, copy=False)\n im_rois = torch.FloatTensor(t_rois).cuda()\n ic_prob, ic_prob1, ic_prob2 = net(im_data_flip, im_rois)\n scores_tmp += ic_prob + ic_prob1 + ic_prob2\n del im_rois\n\n if j is 0 :\n scores_tmp_real = scores_tmp\n pred_boxes = pred_boxes_small\n else :\n scores_tmp_real = torch.cat((scores_tmp_real, scores_tmp), dim=1)\n pred_boxes = np.vstack((pred_boxes, pred_boxes_small))\n\n\n if cfg.DEDUP_BOXES > 0:\n # Map scores and predictions back to the original set of boxes\n scores_tmp = scores_tmp_real[:,inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n \n if i == 0: \n scores = np.copy(scores_tmp.data).squeeze()\n if len(scores.shape) == 1 :\n scores = scores[np.newaxis, :]\n else:\n scores += scores_tmp[0].data\n\n scores /= len(blobs['data']) * (1. + cfg.TEST.USE_FLIPPED)\n return scores[:,1:], pred_boxes[:, 4:]", "def detect_onh(stack):\n profile = np.mean(stack, axis=(0,2))\n max_frame_ind = np.where(profile==np.max(profile))[0][0]\n enface = np.mean(stack[:,0:max_frame_ind+100,:], axis=1)\n ref = gaussian(enface, sigma=2)\n thresh_val = threshold_isodata(ref)\n #I don't think remove small objects should ever be a problem, the default is <64 px\n #This is to prevent a small object from tricking the program into thinking it's the onh when the onh is touching the sides.\n classified_img = morph.remove_small_objects(ref<thresh_val, min_size=200)\n #io.imsave(\"check_raw.tif\", (classified_img.astype('uint8'))*256)\n labels = label(classified_img, connectivity=2)\n props = regionprops(labels)\n onh_ind = max_area(props)\n if onh_ind == -1:\n binary_enface = 1-morph.convex_hull_image(morph.remove_small_objects(morph.binary_closing(classified_img!=True)))\n #io.imsave('binary.tif',(binary_enface.astype('uint8'))*256)\n classified_img = nd.filters.maximum_filter(nd.filters.minimum_filter(binary_enface - classified_img, size=(4,6)),size=(4,6))\n #io.imsave(\"check.tif\", (classified_img.astype('uint8'))*256)\n labels = label(classified_img, connectivity=2)\n props = regionprops(labels)\n onh_ind = max_area(props)\n\n try:\n assert onh_ind!=-1\n except AssertionError:\n return -1\n else:\n return props[onh_ind]", "def isFusion(event,buff):\n index,diff,label = event\n label = label[0]\n if diff>0:\n return False,[]\n img_before = np.copy(buff[:,:,index-1])\n img_after = np.copy(buff[:,:,index])\n mask_before = (img_before==label).astype(np.uint8)\n nb_elts_before = np.amax(img_before)\n kernel = np.ones((7,7),np.uint8)\n neighbouring_mask = cv2.dilate(mask_before,kernel,iterations=8)\n\n new_map = np.multiply(img_before,neighbouring_mask.astype(np.uint8))\n \n #Removing the element we are currently looking at\n new_map[img_before==label]=0\n possible_candidates = []\n for i in range(nb_elts_before):\n if np.any(new_map==i+1):\n possible_candidates.append(i+1)\n #Computes the area of the cells and compares them\n size_cell_disappearing = np.count_nonzero(img_before==label)\n match = [] #lists the ratios sizeAfter/sizeBefore for possible matches\n \n for vals in possible_candidates:\n size_other_cell = np.count_nonzero(img_before==vals)\n size_before = size_other_cell+size_cell_disappearing\n size_after = np.count_nonzero(img_after==vals)\n ratio = float(size_after)/float(size_before)\n if ratio>0.8 and ratio<1.2:\n match.append((vals,abs(1-ratio)))\n if len(match)==0:\n return False,[]\n if len(match)>1:\n #Several matches, so pick the best\n values = [y for x,y in match]\n result_label,osef = match[np.argmin(values)]\n else:\n result_label, osef = match[0]\n return True,result_label", "def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()", "def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r", "def paintings_detection(query_image, mask):\n\n image = cv2.imread(query_image)\n\n image_width = mask.shape[0]\n image_height = mask.shape[1]\n x_box_1, y_box_1, w_box_1, h_box_1, x_box_2, y_box_2, w_box_2, h_box_2 = 0, 0, 0, 0, 0, 0, 0, 0, \n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n \n if (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 == 0):\n x_box_1, y_box_1, w_box_1, h_box_1 = x, y, w, h\n elif (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 != 0):\n x_box_2, y_box_2, w_box_2, h_box_2 = x, y, w, h\n\n if x_box_2 == 0:\n x_value_to_split = 0\n else:\n x_value_to_split = (x_box_1 + w_box_1/2 + x_box_2 + w_box_2/2) / 2\n\n\n return(x_value_to_split)", "def detect(gray,frame):\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n # Drawing the rectangle --> frame, top-left co-ordinate, bottom-right co-ordinate, Color, Thickness\n cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)\n roi_gray = gray[y:y+h,x:x+w]\n roi_color = frame[y:y+h,x:x+w]\n # Detecting the eyes ---> using roi_gray just to reduce computation\n eyes = eyes_cascade.detectMultiScale(roi_gray, 1.1 ,3)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color, (ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n return frame", "def detect_face_task(img):\n\n # paramter for detect\n # image_size = 160\n # margin = 44\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n\n # caffe model\n pnet = caffe_model.get_pnet()\n rnet = caffe_model.get_rnet()\n onet = caffe_model.get_onet()\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n print('detect bounding: ', bounding_boxes)\n print('Find faces: ', bounding_boxes.shape[0])\n\n # all_faces is faces information list, include face bytes, face position\n all_faces = []\n for face_position in bounding_boxes:\n face_position = face_position.astype(int)\n print('face position: ', face_position)\n\n # each face information, include position, face image\n head_rect = face_position[:4].tolist() # numpy array to python list\n head_img = misc.toimage(img).crop(head_rect)\n head_img_io = StringIO.StringIO()\n head_img.save(head_img_io, format='JPEG')\n head_img_b64 = base64.b64encode(head_img_io.getvalue())\n\n # construct response\n face_info = {}\n face_info['rect'] = head_rect\n face_info['image'] = head_img_b64\n\n all_faces.append(face_info)\n\n return all_faces", "def im_detect_mask(model, im_scales, boxes):\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n\n M_HEIGHT = cfg.MRCNN.RESOLUTION_H\n M_WIDTH = cfg.MRCNN.RESOLUTION_W\n if boxes.shape[0] == 0:\n pred_masks = np.zeros((0, M, M), np.float32)\n return pred_masks\n\n inputs = {'mask_rois': _get_rois_blob(boxes, im_scales)}\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'mask_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.mask_net.Proto().name)\n\n # Fetch masks\n pred_global_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_global_probs')\n ).squeeze()\n pred_char_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_char_probs')\n ).squeeze()\n # pred_char_boxes = workspace.FetchBlob(\n # core.ScopedName('mask_fcn_charbox_pred')\n # ).squeeze()\n pred_global_masks = pred_global_masks.reshape([-1, 1, M_HEIGHT, M_WIDTH])\n pred_char_masks = pred_char_masks.reshape([-1, M_HEIGHT, M_WIDTH, 37])\n pred_char_masks = pred_char_masks.transpose([0,3,1,2])\n # pred_char_boxes = pred_char_boxes.reshape([-1, 4, M_HEIGHT, M_WIDTH])\n\n return pred_global_masks, pred_char_masks, None", "def findFaces(self):\n\t\trects = self.detectAll()\n\t\tif len(rects)==0:\n\t\t\trects = []\n\t\telse:\n\t\t\trects[:, 2:] += rects[:, :2]\n\t\tself.analyzeFrame(rects)", "def faceRecognition(image):\r\n faceLandmarks = [[],[],[]]\r\n face_landmarks_list = face_recognition.face_landmarks(image)\r\n if len(face_landmarks_list)>0:\r\n if len(face_landmarks_list[0]['left_eye'])>0:\r\n leftEyePos = [tuple(map(lambda i: int(i/32),i)) for i in face_landmarks_list[0]['left_eye']]\r\n for i in set(leftEyePos):\r\n if leftEyePos.count(i)>=len(leftEyePos)//len(set(leftEyePos)):\r\n faceLandmarks[0] += [i,]\r\n if len(face_landmarks_list[0]['right_eye'])>0:\r\n rightEyePos = [tuple(map(lambda i: int(i/32),i)) for i in face_landmarks_list[0]['right_eye']]\r\n for i in set(rightEyePos):\r\n if rightEyePos.count(i)>=len(rightEyePos)//len(set(rightEyePos)):\r\n faceLandmarks[1] += [i,]\r\n if len(face_landmarks_list[0]['top_lip'])>0:\r\n mouthPos = [tuple(map(lambda i: int(i/32),i)) for i in (face_landmarks_list[0]['top_lip']+face_landmarks_list[0]['bottom_lip'])]\r\n for i in set(mouthPos):\r\n if mouthPos.count(i)>=len(mouthPos)//len(set(mouthPos)):\r\n faceLandmarks[2] += [i,]\r\n return faceLandmarks" ]
[ "0.7271276", "0.72283274", "0.70361453", "0.69948393", "0.6987588", "0.68838495", "0.68149173", "0.6666867", "0.6590072", "0.6567864", "0.6539547", "0.64649886", "0.64562804", "0.64490664", "0.6446386", "0.64457786", "0.6437423", "0.6422928", "0.6421587", "0.64157873", "0.64122385", "0.63993335", "0.63741463", "0.636948", "0.6365514", "0.6364125", "0.63501775", "0.63312066", "0.6326854", "0.6319483" ]
0.73045164
0
Set up a lexical analyzer for `code` in `language`.
def __init__(self, code, language, tokennames='short'): self.code = code self.language = language self.tokennames = tokennames self.lexer = None # get lexical analyzer for `language`: if language in ('', 'text') or tokennames == 'none': return if not with_pygments: raise LexerError('Cannot analyze code. ' 'Pygments package not found.') try: self.lexer = get_lexer_by_name(self.language) except pygments.util.ClassNotFound: raise LexerError('Cannot analyze code. ' 'No Pygments lexer found for "%s".' % language)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n\tlang = get_arguments()\r\n\twiki_analyzer(lang)", "def evaluateCode(lang, code):", "def __init__(\n self, language=Language.ENGLISHCASED, cache_dir=\".\"\n ):\n self.tokenizer = XLNetTokenizer.from_pretrained(language.value, cache_dir=cache_dir)\n self.language = language", "def __init__(self, language):\n self.stemmer = Stemmer(language)\n self.stopwords = stopwords.words(language)", "def init():\n # analyzer es utilizado para interactuar con el modelo\n analyzer = model.newAnalyzer()\n return analyzer", "def set_source(text):\n lexer.input(text)\n lexer.lineno = 1\n lexer.provenance = None", "def _create_spacy_tokenizer(self, language: AnyStr) -> Language:\n start = perf_counter()\n logging.info(f\"Loading tokenizer for language '{language}'...\")\n try:\n if language == \"th\": # PyThaiNLP requires a \"data directory\" even if nothing needs to be downloaded\n os.environ[\"PYTHAINLP_DATA_DIR\"] = mkdtemp() # dummy temp directory\n if language in SPACY_LANGUAGE_MODELS and self.use_models:\n nlp = spacy.load(SPACY_LANGUAGE_MODELS[language])\n else:\n nlp = spacy.blank(language) # spaCy language without models (https://spacy.io/usage/models)\n except (ValueError, OSError) as e:\n raise TokenizationError(\n f\"SpaCy tokenization not available for language '{language}' because of error: '{e}'\"\n )\n if self.hashtags_as_token:\n re_token_match = spacy.tokenizer._get_regex_pattern(nlp.Defaults.token_match)\n re_token_match = r\"\"\"({re_token_match}|#\\w+)\"\"\"\n nlp.tokenizer.token_match = re.compile(re_token_match).match\n _prefixes = list(nlp.Defaults.prefixes)\n if \"#\" in _prefixes:\n _prefixes.remove(\"#\")\n nlp.tokenizer.prefix_search = spacy.util.compile_prefix_regex(_prefixes).search\n if self.stopwords_folder_path and language in SUPPORTED_LANGUAGES_SPACY:\n self._customize_stopwords(nlp, language)\n logging.info(f\"Loading tokenizer for language '{language}': done in {perf_counter() - start:.2f} seconds\")\n if language not in UNSUPPORTED_SPACY_EMOJI_LANG:\n nlp.add_pipe(Emoji(nlp), first=True)\n return nlp", "def initAnalyzer():\n return controller.initAnalyzer()", "def make_lexical_analysis (file_name_input: str, file_name_output: str) -> str:\n\n #open and read lines of input file (p-- code)\n file_input = open(file_name_input, 'r')\n lines = file_input.readlines()\n\n is_comment = False\n has_2_characters = False\n counter_lines = 1\n counter_errors = 0\n counter_characters = 0\n table_tokens = []\n lexical_message = \"\"\n lexical_error = \"\"\n final_word = \"\"\n\n\t#get each line\n for line in lines:\n\n #get words in a line\n words_in_line = line.strip().split(' ')\n\n #get sentences for words in a line\n for sentence in words_in_line:\n\n counter_characters = 0\n\n #get character in a sentence\n for character in sentence:\n\t #current character is a puctuation word?\n if (character in '=,;()+-/*' and is_comment==False and has_2_characters==False) or \\\n (character in ':<>' and is_comment==False and has_2_characters==False) or \\\n (is_comment==False and has_2_characters==True):\n\n lexical_message, counter_errors, has_2_characters, final_word, table_tokens, counter_characters = check_one_or_two_characters (character, final_word, sentence, counter_characters, counter_lines,\n counter_errors,is_comment, has_2_characters, lexical_message, table_tokens)\n\n\t #current final_word is 'end' and current character is '.' (i.e. end of p-- code)?\n elif (is_comment==False) and (final_word == 'end' and character == '.'):\n lexical_message, counter_errors, final_word, table_tokens = check_end_program (character, final_word, counter_errors, counter_lines, lexical_message, table_tokens)\n\n\t #check if the comment starts or ends\n elif ((is_comment == False) and (character == '{')) or ((is_comment == True) and (character == '}')):\n is_comment, line_comment = check_comments (character, is_comment, counter_lines)\n\n\t #final word is probably an identifier, a reserved word, an integer number or a real number!!!\n elif is_comment == False:\n final_word = final_word + character\n\n if final_word != \"\":\n #recognize the final_word and added to table\n lexical_message, counter_errors = add_final_word_to_table (table_tokens, lexical_message, final_word, counter_lines, counter_errors)\n final_word = \"\"\n\n\t #analyse next line\n counter_lines = counter_lines + 1\n\n\t#the comment was opened but not closed?\n if is_comment == True:\n lexical_message = lexical_message + final_word + ',' + get_message_lexical_error (4, counter_lines-1)\n counter_errors += 1\n\n\t#open output file and write lexical message\n \n file_output = open(file_name_output, 'w')\n file_output.write(lexical_errors)\n \n\n file_output.close()\n file_input.close()\n return table_tokens", "def set_analyzer(self, analyzer):\n return self.set_param(\"analyzer\", analyzer)", "def addLang(self, lang, index_dir=None):\n idx_dir = self.get_index_name(lang, index_dir=index_dir)\n self.idx_dir[lang] = idx_dir\n if not os.path.exists(idx_dir):\n raise ValueError(f\"No index in {idx_dir}!\")\n directory = SimpleFSDirectory(Paths.get(idx_dir))\n self.searcher[lang] = IndexSearcher(DirectoryReader.open(directory))\n self.analyzer[lang] = analyzers[lang]()\n self.parser_context[lang] = QueryParser(\"context\", self.analyzer[lang])\n self.parser_title[lang] = QueryParser(\"title\", self.analyzer[lang])\n self.parser_multi[lang] = MultiFieldQueryParser([\"title\", \"context\"], self.analyzer[lang])\n self.parser_multi[lang].setDefaultOperator(QueryParser.Operator.OR)\n self.searcher[lang].setSimilarity(self.similarity)", "def init(lang):\n pass", "def __init__(self, source: str, on_error=None):\n self.source = source\n self.on_error = on_error\n self.tokens = []", "def setup(app):\r\n\r\n # This is only a lexer, so adding it below to pygments appears sufficient.\r\n # But if somebody knows that the right API usage should be to do that via\r\n # sphinx, by all means fix it here. At least having this setup.py\r\n # suppresses the sphinx warning we'd get without it.\r\n pass", "def ConfigureLexer(self, file_ext):\n syn_data = self._code['synmgr'].GetSyntaxData(file_ext)\n\n # Set the ID of the selected lexer\n self._code['lang_id'] = syn_data.LangId\n\n lexer = syn_data.Lexer\n # Check for special cases\n # TODO: add fetch method to check if container lexer requires extra\n # style bytes beyond the default 5.\n if lexer in [ wx.stc.STC_LEX_HTML, wx.stc.STC_LEX_XML]:\n self.SetStyleBits(7)\n elif lexer == wx.stc.STC_LEX_NULL:\n self.SetStyleBits(5)\n self.SetLexer(lexer)\n self.ClearDocumentStyle()\n self.UpdateBaseStyles()\n return True\n else:\n self.SetStyleBits(5)\n\n # Set Lexer\n self.SetLexer(lexer)\n # Set Keywords\n self.SetKeyWords(syn_data.Keywords)\n # Set Lexer/Syntax Specifications\n self.SetSyntax(syn_data.SyntaxSpec)\n # Set Extra Properties\n self.SetProperties(syn_data.Properties)\n # Set Comment Pattern\n self._code['comment'] = syn_data.CommentPattern\n\n # Get Extension Features\n clexer = syn_data.GetFeature(synglob.FEATURE_STYLETEXT)\n indenter = syn_data.GetFeature(synglob.FEATURE_AUTOINDENT)\n\n # Set the Container Lexer Method\n self._code['clexer'] = clexer\n # Auto-indenter function\n self._code['indenter'] = indenter", "def parse_analyzer(config: Config) -> Analyzer:\n config = config.parse(PARSER)\n cls: t.Type[Analyzer] = config['class'].value\n analyzer: Analyzer = cls()\n analyzer.configure(config.get('config', {}))\n return analyzer", "def syntax_highlight(lang, code):\n\n highlighted = None\n\n try:\n if lang.lower() == 'python':\n highlighted = highlight(code, PythonLexer(), HtmlFormatter())\n\n elif lang.lower() == 'shell':\n highlighted = highlight(code, BashLexer(), HtmlFormatter())\n\n elif lang.lower() == 'asp':\n highlighted = highlight(code, CSharpAspxLexer(), HtmlFormatter())\n\n elif lang.lower() == 'csharp':\n highlighted = highlight(code, CSharpLexer(), HtmlFormatter())\n\n elif lang.lower() == 'ruby':\n highlighted = highlight(code, RubyLexer(), HtmlFormatter())\n\n elif lang.lower() == 'json':\n highlighted = highlight(code, JsonLexer(), HtmlFormatter())\n\n elif lang.lower() == 'js':\n highlighted = highlight(code, JavascriptLexer(), HtmlFormatter())\n\n elif lang.lower() == 'objective-c':\n highlighted = highlight(code, ObjectiveCLexer(), HtmlFormatter())\n\n elif lang.lower() == 'java':\n highlighted = highlight(code, JavaLexer(), HtmlFormatter())\n\n splitted = highlighted.split('\"highlight')\n highlighted = splitted[0] + '\"highlight '+lang + splitted[1]\n\n highlighted = highlighted.replace(\"<pre>\", \"\")\n highlighted = highlighted.replace(\"</pre>\", \"\")\n highlighted = highlighted.replace(\"div\", \"pre\")\n\n return highlighted\n except Exception as e:\n raise e", "def test_unknown_language(self):\n lexer = syntax_highlighting.fetch_lexer('', 'lkjasdlkjsad')\n self.assertIsNotNone(lexer)", "def text_analyzer(*text):\n if len(text) > 1:\n print(\"ERROR\")\n return\n if len(text) == 0 or isinstance(text[0], str) == 0:\n text = []\n text.append(input(\"What is the text to analyse?\\n>> \"))\n ponctu_list = string.punctuation\n nb_upper = 0\n nb_lower = 0\n nb_ponct = 0\n nb_spaces = 0\n letters = 0\n for char in text[0]:\n letters += 1\n if char == ' ':\n nb_spaces += 1\n elif char.isupper():\n nb_upper += 1\n elif char.islower():\n nb_lower += 1\n elif char in ponctu_list:\n nb_ponct += 1\n print(\"The text contains {} characters:\" .format(letters), '\\n')\n print(\"-\", nb_upper, \"upper letters\\n\")\n print(\"-\", nb_lower, \"lower letters\\n\")\n print(\"-\", nb_ponct, \"punctuation marks\\n\")\n print(\"-\", nb_spaces, \"spaces\")", "def language(self, text_language):\n language = text_language.strip().lower()\n if language in LANGUAGE_TO_CODE:\n self._language_code = LANGUAGE_TO_CODE[language]\n else:\n self._language_code = language[:2]", "def __init__(self, language: str):\n self.input_files = [\"../morphology/\" + language + '/' + language + \".clean.train.conll\",\n \"../morphology/\" + language + '/' + language + \".clean.dev.conll\",\n \"../morphology/\" + language + '/' + language + \".clean.test.conll\"]\n self.language = language", "def __init__ (self, languageFilename):\n if not isinstance(languageFilename, str): # Checks if the filename is entered as a string.\n raise TypeError('The filename must be a string')\n self._words = set()\n try:\n with open(languageFilename) as data:\n line = data.readline()\n while line:\n line = line.rstrip()\n self._words.add(line)\n line = data.readline()\n except IOError:\n print('Please specify the correct name for the dictionary')", "def test_analyzer():\n import analyzer\n\n analyzer # Fake usage.", "def _build_analyzer():\n analyzer = {}\n for e in LOSSLESS:\n analyzer[e] = lambda d, _ext, name: d.lossless.append(name)\n for e in COMPRESSED:\n analyzer[e] = lambda d, _ext, name: d.compressed.append(name)\n for e in IMAGES:\n analyzer[e] = lambda d, _ext, name: d.images.append(name)\n for e in VIDEOS:\n analyzer[e] = lambda d, _ext, name: d.videos.append(name)\n\n def _increment_ignored(d, _ext, _name):\n d.ignored += 1 # Can't use assignment in lambda\n\n for e in IGNORE:\n analyzer[e] = _increment_ignored\n analyzer['cue'] = lambda d, _, name: d.cue.append(name)\n\n return analyzer", "def _init_tokenizer(self, save_dir, config, src_lang, tgt_lang):\n tokenizer_args = {\n 'do_lower_case': False,\n 'do_basic_tokenize': False,\n 'cache_dir': self._cache,\n 'use_fast': self._use_fast(),\n 'src_lang': src_lang,\n 'tgt_lang': tgt_lang,\n }\n if save_dir is not None:\n tokenizer_args.update({'pretrained_model_name_or_path': save_dir, 'config': config})\n else:\n tokenizer_args.update({'pretrained_model_name_or_path': self._pretrained_name})\n\n self._tokenizer = AutoTokenizer.from_pretrained(**tokenizer_args)\n\n # We only include the base tokenizers since `isinstance` checks for inheritance\n if isinstance(self._tokenizer, (BertTokenizer, BertTokenizerFast)):\n self._tokenizer.is_piece_fn = lambda wp: wp.startswith('##')\n elif isinstance(\n self._tokenizer,\n (\n XLMRobertaTokenizer,\n XLMRobertaTokenizerFast,\n T5Tokenizer,\n T5TokenizerFast,\n BartTokenizer,\n BartTokenizerFast,\n MBartTokenizer,\n MBartTokenizerFast,\n MBart50Tokenizer,\n MBart50TokenizerFast,\n MarianTokenizer,\n M2M100Tokenizer,\n XGLMTokenizer,\n XGLMTokenizerFast,\n NllbTokenizer,\n NllbTokenizerFast,\n ),\n ):\n self._tokenizer.is_piece_fn = lambda wp: not wp.startswith(SPIECE_UNDERLINE)\n elif isinstance(self._tokenizer, (GPT2Tokenizer, GPT2TokenizerFast)):\n self._tokenizer.is_piece_fn = lambda wp: not wp.startswith('Ġ')\n elif isinstance(self._tokenizer, ByT5Tokenizer):\n self._tokenizer.is_piece_fn = lambda wp: False\n\n # make sure we assigned is_piece_fn\n assert self._tokenizer.is_piece_fn", "def __init__(self, language=None):\n self._language = language", "def __init__(self, parent=None):\n QsciLexerJava.__init__(self, parent)\n Lexer.__init__(self)\n \n self.commentString = \"//\"\n self.streamCommentString = {\n 'start': '/* ',\n 'end': ' */'\n }\n self.boxCommentString = {\n 'start': '/* ',\n 'middle': ' * ',\n 'end': ' */'\n }\n \n self.keywordSetDescriptions = [\n self.tr(\"Primary keywords and identifiers\"),\n self.tr(\"Secondary keywords and identifiers\"),\n self.tr(\"Documentation comment keywords\"),\n self.tr(\"Global classes and typedefs\"),\n self.tr(\"Preprocessor definitions\"),\n self.tr(\"Task marker and error marker keywords\"),\n ]", "def analyze_syntax(self, languageModel):\n analysis = languageModel.annotate_text(include_entities=False, include_sentiment=False)\n return analysis.tokens", "async def runl(self, ctx: commands.Context, lang: str, *, code: str):\n result = await self._run_code(lang=lang, code=code)\n await self._send_result(ctx, result)", "def test_parser_infers_language(self):\n filename = \"main.c\"\n language = self.parser.infer_language(filename)\n assert_equal(\"C\", language)" ]
[ "0.58799744", "0.5864884", "0.58409506", "0.56989753", "0.5639393", "0.5590425", "0.546683", "0.5445196", "0.5430377", "0.53696084", "0.5355774", "0.53089696", "0.5282451", "0.5280632", "0.52497286", "0.5227728", "0.5212123", "0.5178399", "0.5176204", "0.5166289", "0.51205105", "0.5087208", "0.5074131", "0.50563663", "0.5050403", "0.50495493", "0.5041645", "0.50282174", "0.502353", "0.5013993" ]
0.71450984
0
Merge subsequent tokens of same tokentype. Also strip the final newline (added by pygments).
def merge(self, tokens): tokens = iter(tokens) (lasttype, lastval) = tokens.next() for ttype, value in tokens: if ttype is lasttype: lastval += value else: yield(lasttype, lastval) (lasttype, lastval) = (ttype, value) if lastval.endswith('\n'): lastval = lastval[:-1] if lastval: yield(lasttype, lastval)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rehydrate_text(self, next_token):\n prefix_text = \"\"\n main_text = next_token.token_text.replace(\n InlineHelper.backspace_character, \"\"\n ).replace(\"\\x08\", \"\")\n\n print(\n \">>rehydrate_text>>\" + main_text.replace(\"\\a\", \"\\\\a\").replace(\"\\n\", \"\\\\n\")\n )\n main_text = self.resolve_replacement_markers(main_text)\n print(\n \"<<rehydrate_text>>\" + main_text.replace(\"\\a\", \"\\\\a\").replace(\"\\n\", \"\\\\n\")\n )\n\n print(\n \"<<leading_whitespace>>\"\n + next_token.extracted_whitespace.replace(\"\\a\", \"\\\\a\")\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\x03\", \"\\\\x03\")\n )\n leading_whitespace = self.resolve_replacement_markers(\n next_token.extracted_whitespace\n )\n print(\n \"<<leading_whitespace>>\"\n + leading_whitespace.replace(\"\\a\", \"\\\\a\")\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\x03\", \"\\\\x03\")\n )\n if self.block_stack:\n if (\n self.block_stack[-1].token_name\n == MarkdownToken.token_indented_code_block\n ):\n main_text = self.reconstitute_indented_text(\n main_text,\n self.block_stack[-1].extracted_whitespace,\n self.block_stack[-1].indented_whitespace,\n leading_whitespace,\n )\n prefix_text = \"\"\n leading_whitespace = \"\"\n elif self.block_stack[-1].token_name == MarkdownToken.token_html_block:\n main_text += \"\\n\"\n elif self.block_stack[-1].token_name == MarkdownToken.token_paragraph:\n if \"\\n\" in main_text:\n split_token_text = main_text.split(\"\\n\")\n split_parent_whitespace_text = self.block_stack[\n -1\n ].extracted_whitespace.split(\"\\n\")\n print(\n \">>split_token_text>>\"\n + str(split_token_text)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n print(\n \">>split_parent_whitespace_text>>\"\n + str(split_parent_whitespace_text)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n\n # TODO never incrementing?\n parent_rehydrate_index = self.block_stack[-1].rehydrate_index\n\n rejoined_token_text = []\n for iterator in enumerate(split_token_text, start=0):\n print(\">>\" + str(iterator))\n if iterator[0] == 0:\n joined_text = iterator[1]\n else:\n joined_text = (\n split_parent_whitespace_text[\n parent_rehydrate_index + iterator[0]\n ]\n + iterator[1]\n )\n rejoined_token_text.append(joined_text)\n split_token_text = rejoined_token_text\n\n if next_token.end_whitespace:\n split_end_whitespace_text = next_token.end_whitespace.split(\n \"\\n\"\n )\n print(\n \">>split_end_whitespace_text>>\"\n + str(split_end_whitespace_text)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n assert len(split_token_text) == len(split_end_whitespace_text)\n\n joined_token_text = []\n for iterator in enumerate(split_token_text):\n print(\">>\" + str(iterator))\n joined_text = (\n iterator[1] + split_end_whitespace_text[iterator[0]]\n )\n joined_token_text.append(joined_text)\n split_token_text = joined_token_text\n main_text = \"\\n\".join(split_token_text)\n elif self.block_stack[-1].token_name == MarkdownToken.token_setext_heading:\n if \"\\n\" in main_text:\n split_token_text = main_text.split(\"\\n\")\n split_parent_whitespace_text = next_token.end_whitespace.split(\"\\n\")\n print(\n \">>split_token_text>>\"\n + str(split_token_text)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n print(\n \">>split_parent_whitespace_text>>\"\n + str(split_parent_whitespace_text)\n .replace(\"\\n\", \"\\\\n\")\n .replace(\"\\t\", \"\\\\t\")\n )\n\n # TODO never incrementing?\n parent_rehydrate_index = 0 # self.block_stack[-1].rehydrate_index\n\n rejoined_token_text = []\n for iterator in enumerate(split_token_text, start=0):\n print(\">>iterator=\" + str(iterator))\n split_setext_text = []\n ws_prefix_text = \"\"\n ws_suffix_text = \"\"\n if split_parent_whitespace_text[iterator[0]]:\n split_setext_text = split_parent_whitespace_text[\n iterator[0]\n ].split(\"\\x02\")\n print(\">>split_setext_text=\" + str(split_setext_text))\n if len(split_setext_text) == 1:\n if iterator[0] == 0:\n ws_suffix_text = split_setext_text[0]\n else:\n ws_prefix_text = split_setext_text[0]\n else:\n assert len(split_setext_text) == 2\n ws_prefix_text = split_setext_text[0]\n ws_suffix_text = split_setext_text[1]\n\n joined_text = ws_prefix_text + iterator[1] + ws_suffix_text\n rejoined_token_text.append(joined_text)\n\n print(\">>rejoined_token_text=\" + str(rejoined_token_text))\n main_text = \"\\n\".join(rejoined_token_text)\n return prefix_text + leading_whitespace + main_text", "def _merge_conllu_subtokens(self, lines: List[str], doc: Doc)-> Doc:\n # identify and process all subtoken spans to prepare attrs for merging\n subtok_spans = []\n for line in lines:\n parts = line.split(\"\\t\")\n id_, word, lemma, pos, tag, morph, head, dep, _1, misc = parts\n if \"-\" in id_:\n subtok_start, subtok_end = id_.split(\"-\")\n subtok_span = doc[int(subtok_start) - 1 : int(subtok_end)]\n subtok_spans.append(subtok_span)\n # create merged tag, morph, and lemma values\n tags = []\n morphs = {}\n lemmas = []\n for token in subtok_span:\n tags.append(token.tag_)\n lemmas.append(token.lemma_)\n if token._.merged_morph:\n for feature in token._.merged_morph.split(\"|\"):\n field, values = feature.split(\"=\", 1)\n if field not in morphs:\n morphs[field] = set()\n for value in values.split(\",\"):\n morphs[field].add(value)\n # create merged features for each morph field\n for field, values in morphs.items():\n morphs[field] = field + \"=\" + \",\".join(sorted(values))\n # set the same attrs on all subtok tokens so that whatever head the\n # retokenizer chooses, the final attrs are available on that token\n for token in subtok_span:\n token._.merged_orth = token.orth_\n token._.merged_lemma = \" \".join(lemmas)\n token.tag_ = \"_\".join(tags)\n token._.merged_morph = \"|\".join(sorted(morphs.values()))\n token._.merged_spaceafter = (\n True if subtok_span[-1].whitespace_ else False\n )\n\n with doc.retokenize() as retokenizer:\n for span in subtok_spans:\n retokenizer.merge(span)\n\n return doc", "def rstrip(self, *args, **kwargs):\n stripped = self.text.rstrip(*args, **kwargs)\n offset = self.text.find(stripped)\n return Token(stripped, self.position + offset, self.category)", "def rehydrate_blank_line(cls, next_token):\n return next_token.extracted_whitespace + \"\\n\"", "def finalize(self):\n if self.whitespace:\n return Token('', self.whitespace,\n typ=Token.WHITESPACE, position=self.position)\n return None", "def rehydrate_paragraph_end(self, next_token):\n assert next_token\n top_stack_token = self.block_stack[-1]\n del self.block_stack[-1]\n return top_stack_token.final_whitespace + \"\\n\"", "def __add__(self, other):\n if isinstance(other, Token):\n return Token(self.text + other.text, self.position, self.category)\n else:\n return Token(self.text + other, self.position, self.category)", "def _trim_end(self, tokens: list[Token]) -> Block:\n i = last_token = self.end - 1\n while tokens[i].name in NON_CODING_TOKENS | {'DEDENT', 'NEWLINE'}:\n # if we find an indented comment inside our block, keep it\n if (\n tokens[i].name in {'NL', 'NEWLINE'} and\n tokens[i + 1].name == UNIMPORTANT_WS and\n len(tokens[i + 1].src) > self._initial_indent(tokens)\n ):\n break\n # otherwise we've found another line to remove\n elif tokens[i].name in {'NL', 'NEWLINE'}:\n last_token = i\n i -= 1\n return self._replace(end=last_token + 1)", "def rehydrate_paragraph(self, next_token):\n self.block_stack.append(next_token)\n next_token.rehydrate_index = 0\n extracted_whitespace = next_token.extracted_whitespace\n if \"\\n\" in extracted_whitespace:\n line_end_index = extracted_whitespace.index(\"\\n\")\n extracted_whitespace = extracted_whitespace[0:line_end_index]\n return extracted_whitespace", "def tokenize(change):\n\n for token in change.tokenize():\n yield token\n\n yield NEW_LINE\n yield NEW_LINE\n\n yield Token.Text, 'Change successfully merged (revision: '\n yield Token.Keyword, change.current_revision[:8]\n yield Token.Text, ')'", "def untokenize(self):\n return ''.join([t[self.TEXT_WS] for t in self.data]).strip()", "def untokenize(self):\n return ''.join([t[self.TEXT_WS] for t in self.data]).strip()", "def merged_tokens(self, i, tokenizer):\n out = []\n counts = []\n last_id = None\n tokens = tokenizer.convert_ids_to_tokens(self.token_ids[i].tolist())\n for token, current_id in zip(tokens[1:-1], self.conll_ids[i]):\n if last_id == current_id.item():\n if token.startswith(\"##\"):\n out[-1] += (token[2:])\n else:\n out[-1] += token\n counts[-1] += 1\n else:\n out.append(token)\n counts.append(1)\n last_id = current_id\n return out, counts", "def untokenize(doc):\r\n return ' '.join(doc)", "def __radd__(self, other):\n return Token(\n other + self.text, self.position - len(other), self.category)", "def add_accumulated(self) -> None:\n if len(self.line_parts):\n for word in self.next_line.line_parts[0].words:\n self.line_parts[-1].add_word(word)\n self.next_line.line_parts = self.next_line.line_parts[1:]\n\n self.line_parts.extend(self.next_line.line_parts)\n last_part = self.line_parts[-1]\n last_part.add_word(' ')\n self.next_line.line_parts = [\n PDFTextLinePart(last_part.style, self.fonts, last_part.ids)\n ]", "def tokeneater(self, toktype, toktext, (srow,scol), (erow,ecol), line):\n # If we encounter any errors, then just give up.\n if toktype == token.ERRORTOKEN:\n raise tokenize.TokenError, toktype\n\n # Did we skip anything whitespace? If so, add a pseudotoken\n # for it, with toktype=None. (Note -- this skipped string\n # might also contain continuation slashes; but I won't bother\n # to colorize them.)\n startpos = self.line_offsets[srow] + scol\n if startpos > self.pos:\n skipped = self.text[self.pos:startpos]\n self.cur_line.append( (None, skipped) )\n\n # Update our position.\n self.pos = startpos + len(toktext)\n\n # Update our current line.\n self.cur_line.append( (toktype, toktext) )\n\n # When we reach the end of a line, process it.\n if toktype == token.NEWLINE or toktype == token.ENDMARKER:\n self.handle_line(self.cur_line)\n self.cur_line = []", "def compact(self):\n if self._changed == False:\n return\n t = self._tokens\n\n if t in [[], '*']:\n return \n\n # Tokens which can be reordered and joined if juxtapoxed.\n # E.g. 3I3D3I --> 3D6I\n freeToks = ['I','D']\n iFirstFree = None\n for i in range(0,len(t)):\n # ...and i != len(t)-1 makes sure that sorting (`else`-Block) takes\n # place if the token list ends with a free (=reorderable) token\n if t[i][1] in freeToks and i != len(t)-1:\n if iFirstFree == None: \n iFirstFree = i\n else:\n if iFirstFree != None:\n # Sort by key\n t[iFirstFree:i+1] = sorted(t[iFirstFree:i+1], key=itemgetter(1))\n iFirstFree = None\n\n out = [t[0]]\n for i in range(1,len(t)):\n if t[i][1] == out[-1][1]:\n out[-1] = (out[-1][0] + t[i][0], t[i][1])\n else:\n out.append(t[i])\n\n\n self._tokens = out\n self._changed = False", "def merge_lines(self, curr, new):\n\n if len(new) > 2 or new[0] != \"NOT:\":\n add = []\n for i in range(len(new)):\n if \":\" in new[i] and new[i+1] != \"{\":\n if new[i] in curr:\n ind = curr.index(new[i])+1\n curr[ind] = self.merge(curr[ind], new[i+1])\n else:\n add.append(new[i])\n add.append(new[i+1])\n if add:\n if curr[-1] == \"}\":\n curr = curr[:-1] + add + [curr[-1]]\n else:\n curr = curr + add\n else:\n curr = curr[:-1]\n return curr", "def rehydrate_thematic_break(cls, next_token):\n return next_token.extracted_whitespace + next_token.rest_of_line + \"\\n\"", "def str (self, max_len_first, max_len_following=0) :\r\n\r\n\t\t## This version indents continuation lines by 1 space\r\n\t\tif not max_len_following == 0 :\r\n\t\t\tmax_len_following -= 1\r\n\t\t\r\n\t\tresult = tokenizer.join_tokens(self.tokens, max_len_first, max_len_following)\r\n\t\tresult[1:] = [ (\" \" + r) for r in result[1:] ]\r\n\r\n\t\treturn [result]", "def __iadd__(self, other):\n if isinstance(other, Token):\n new = Token(self.text + other.text, self.position, self.category)\n else:\n new = Token(self.text + other, self.position, self.category)\n return new", "def concatenate_processed_text(self):\n\n\n\t\tconcatenated_text = \"\"\n\t\tfor line in self.processed_text:\n\t\t\tconcatenated_text += \" \".join(line) + \" \"\n\n\n\t\t# Remove the trailing space character from the concatenated string\n\t\t# of words.\n\t\tconcatenated_text = concatenated_text[:-1]\n\n\t\tself.concatenated_text = concatenated_text", "def do_merge(self, line):\n self.review.merge()", "def nlp_merge_subtokens(doc, label=\"subtok\"):\n\n merger = Matcher(doc.vocab)\n merger.add(\"SUBTOK\", None, [{\"DEP\": label, \"op\": \"+\"}])\n matches = nlp_merge_common_matches(merger(doc))\n spans = [doc[start: end + 1] for _, start, end in matches]\n\n with doc.retokenize() as retokenizer:\n for span in spans:\n retokenizer.merge(span)\n return doc", "def detokenize(sent):\n new_sent = []\n for i, tok in enumerate(sent):\n if tok.startswith(\"##\"):\n new_sent[len(new_sent) - 1] = new_sent[len(new_sent) - 1] + tok[2:]\n else:\n new_sent.append(tok)\n return new_sent", "def combine(self, token):\n if token==None:\n return None\n retval = ''\n for tok in token:\n if isinstance(tok, list):\n retval+=self.combine(tok)\n else:\n retval+=tok\n return retval", "def detokenize(tokens):\n pass", "def get_retokenized(tokenizer, text):\n return ' '.join(tokenizer.tokenize(text))", "def consume(self):\n if self.next():\n self.tokens.pop(0)" ]
[ "0.60590655", "0.59240514", "0.57452136", "0.5704347", "0.5698722", "0.5688145", "0.56433755", "0.56253797", "0.5558617", "0.55530834", "0.55441177", "0.55441177", "0.5538296", "0.5537822", "0.55091155", "0.54938656", "0.5481803", "0.5433574", "0.5423299", "0.54027677", "0.5400288", "0.5395437", "0.5377362", "0.53269607", "0.5319696", "0.5226407", "0.52230483", "0.5219821", "0.5216755", "0.5211338" ]
0.7113429
0
Parse self.code and yield "classified" tokens.
def __iter__(self): if self.lexer is None: yield ([], self.code) return tokens = pygments.lex(self.code, self.lexer) for tokentype, value in self.merge(tokens): if self.tokennames == 'long': # long CSS class args classes = str(tokentype).lower().split('.') else: # short CSS class args classes = [_get_ttype_class(tokentype)] classes = [cls for cls in classes if cls not in unstyled_tokens] yield (classes, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(source_code):\n tokens = tokenize(source_code)\n return read(tokens)", "def tokenize(code):\n lineno = 1\n line_start = 0\n\n for match in TOKEN_REGEX.finditer(code):\n kind = match.lastgroup\n value = match.group(kind)\n\n if kind == 'IGNORE':\n if '\\n' in value:\n lineno += value.count('\\n')\n # match.start() is where the start of the ignored value\n # is in the code, and value.rindex('\\n')+1 is where the\n # last newline is in the value so line_start will be end\n # of that last newline, or beginning of the next line\n line_start = match.start() + value.rindex('\\n') + 1\n\n elif kind == 'ERROR':\n raise ValueError(\"invalid syntax: \" + match.group())\n\n else:\n startcol = match.start() - line_start\n endcol = match.end() - line_start\n yield Token((lineno, startcol), (lineno, endcol), kind, value)", "def tokenize(self, code):\n\t\tidx = 0\n\t\twhile idx < len(code):\n\t\t\tchar = code[idx]\n\t\t\tif char in SYMBOLS:\n\t\t\t\tescaped = self.escape_token(char)\n\t\t\t\tself.tokens.append(Token(T_SYMBOL, escaped))\n\t\t\t\tidx += 1\n\t\t\t\tcontinue\n\t\t\telif char.isspace():\n\t\t\t\tidx += 1\n\t\t\t\tcontinue\n\t\t\telif char.isdigit():\n\t\t\t\tnum = ''\n\t\t\t\twhile char.isdigit():\n\t\t\t\t\tnum += char\n\t\t\t\t\tidx += 1\n\t\t\t\t\tchar = code[idx]\n\t\t\t\tself.tokens.append(Token(T_INTEGER_CONSTANT, num))\n\t\t\t\tcontinue\n\t\t\telif char == '\"':\n\t\t\t\ttxt = ''\n\t\t\t\tidx += 1\n\t\t\t\tchar = code[idx]\n\t\t\t\twhile char != '\"':\n\t\t\t\t\ttxt += char\n\t\t\t\t\tidx += 1\n\t\t\t\t\tchar = code[idx]\n\t\t\t\tself.tokens.append(Token(T_STRING_CONSTANT, txt))\n\t\t\t\tidx += 1 # skip closing quote\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tidr = ''\n\t\t\t\twhile not char.isspace() and char not in SYMBOLS:\n\t\t\t\t\tidr += char\n\t\t\t\t\tidx += 1\n\t\t\t\t\tchar = code[idx]\n\t\t\t\ttoken_type = T_KEYWORD if idr in KEYWORDS else T_IDENTIFIER\n\t\t\t\tself.tokens.append(Token(token_type, idr))\n\t\t\t\tcontinue\n\t\treturn self.tokens", "def compile_class(self):\r\n self.tokenizer.advance() # ignore 'class' keyword\r\n self.class_name = self.tokenizer.identifier()\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n while self.tokenizer.curtok < len(self.tokenizer.tokens) - 1:\r\n dec = self.tokenizer.key_word()\r\n if dec == \"field\" or dec == \"static\":\r\n self.compile_var_dec()\r\n else:\r\n self.compile_subroutine()\r\n self.tokenizer.advance()", "def compile_class(self) -> None:\n self._consume('class')\n if self.tokenizer.token_type() != TokenTypes.IDENTIFIER:\n raise CompilationEngineError(f\"{self._get_current_token()} is an invalid token at this point. Expected a \"\n f\"class name.\")\n\n self.class_name = self._get_current_token()\n self._consume(TokenTypes.IDENTIFIER)\n self._consume('{')\n\n while self._get_current_token() != '}':\n if self._get_current_token() in CompilationEngine.CLASS_VAR_DEC_TOKENS:\n self.compile_class_var_dec()\n elif self._get_current_token() in CompilationEngine.SUBROUTINE_TOKENS:\n self.compile_subroutine_dec()\n else:\n raise CompilationEngineError(f\"{self._get_current_token()} is an expected token at this point\")\n\n self._consume('}')", "def run_parser(self, code_text):\n stream = io.TextIOWrapper(io.BytesIO(code_text), encoding=\"utf8\")\n self.scanner = MyScanner(stream, self.language)\n self.scanner.libraries = []\n\n while 1:\n logging.info(\"in parser, starting while\")\n token = self.scanner.read()\n logging.info(\"in run parser, token {}\".format(token))\n logging.info(\"in run parser, scanner position {}\".format(self.scanner.position()))\n if token[0] == KEYWORD:\n self.keywords.append(token[1])\n elif token[0] == OPERATOR:\n self.operations.append(token[1])\n elif token[0] == LITERAL:\n self.literals.append(token[1])\n\n if token[0] is None:\n break\n elif token[0] == \"unrecognized\":\n pass\n # raise errors.UnrecognizedInput(self.scanner, '')\n elif token[0] == COMMENT or token[0] == STRING:\n parsed = (token[0], token[1], self.scanner.position())\n self.list_of_tuples.append(parsed)\n else:\n self.full_list[token[1]] = token[0]\n parsed = (token[0], token[1], self.scanner.position())\n self.list_of_tuples.append(parsed)\n return self.full_list, self.list_of_tuples", "def tokenize_for_bleu_eval(self, code):\n code = re.sub(r'([^A-Za-z0-9_])', r' \\1 ', code)\n code = re.sub(r'([a-z])([A-Z])', r'\\1 \\2', code)\n code = re.sub(r'\\s+', ' ', code)\n code = code.replace('\"', '`')\n code = code.replace('\\'', '`')\n tokens = [t for t in code.split(' ') if t]\n\n return tokens", "def parse_code_classes(self):\n # Step1 : Gather XML files list\n if not self._xml_files_list:\n self.parse_code_files(store_xml_files_list=True)\n\n # Step 2: Parse all corresponding XML files.\n classes, classes_per_file = parse_xml_files_list(ClassLevelParser, self._xml_files_list)\n return classes, classes_per_file", "def tokenize(code): # pylint: disable=too-many-branches\n tokens = []\n token = Token()\n for c in code: # pylint: disable=invalid-name\n is_whitespace = c in string.whitespace\n is_and = (c == '&')\n is_or = (c == '|')\n is_oparen = (c == '(')\n is_cparen = (c == ')')\n is_invert = (c == '!')\n is_ident = not (is_whitespace or is_and or is_or or is_oparen or is_cparen)\n is_operator = is_and or is_or or is_oparen or is_cparen\n\n end_token_before = is_cparen or is_whitespace\n end_token_after = is_operator\n\n if end_token_before:\n if token.type != TokenType.UNINITIALIZED:\n tokens.append(token)\n token = Token()\n\n if is_whitespace:\n continue\n\n if is_ident or is_invert:\n token.type = TokenType.IDENTIFIER\n\n if is_invert:\n token.negated = is_invert\n\n # pylint: disable=redefined-variable-type\n if is_operator:\n token.type = TokenType.OPERATOR\n\n if is_and:\n token.operator_type = OperatorType.AND\n if is_or:\n token.operator_type = OperatorType.OR\n if is_oparen:\n token.operator_type = OperatorType.OPEN_PAREN\n if is_cparen:\n token.operator_type = OperatorType.CLOSE_PAREN\n # pylint: enable=redefined-variable-type\n\n if not is_invert:\n token.value += c\n\n if end_token_after:\n tokens.append(token)\n token = Token()\n\n if token.type != TokenType.UNINITIALIZED:\n tokens.append(token)\n\n return tokens", "def compile(self):\n\n\t\twhile(self.tokenizer.has_more_tokens()):\n\n\t\t\tif self.tokenizer.get_token() == 'class':\n\t\t\t\tself.compile_class()\n\t\t\telif self.tokenizer.get_token() in ['field','static']:\n\t\t\t\tself.compile_class_var_dec()\n\t\t\telif self.tokenizer.get_token() in ['function', 'method', 'constructor']:\n\t\t\t\tself.compile_subroutine()\n\n\t\tself.outfile.write('<symbol> } </symbol>\\n' + '</class>')\n\t\tself.outfile.close()", "def visit_code(self, code):\n\n def build_tuple(tup):\n out = []\n for e in tup:\n if isinstance(e, tuple):\n out.append(build_tuple(e))\n else:\n out.append(('prim', type(e)))\n return ('tuple', tuple(out))\n\n folds = _FoldedOps()\n for block in code.order:\n stack = _Stack()\n for op in block:\n if isinstance(op, opcodes.LOAD_CONST):\n elt = code.consts[op.arg]\n if isinstance(elt, tuple):\n typ = build_tuple(elt)\n stack.push(_Constant(typ, elt, typ[1], op))\n else:\n stack.push(_Constant(('prim', type(elt)), elt, None, op))\n elif isinstance(op, opcodes.BUILD_LIST):\n stack.build(list, op)\n elif isinstance(op, opcodes.BUILD_SET):\n stack.build(set, op)\n elif isinstance(op, opcodes.FORMAT_VALUE):\n if op.arg & loadmarshal.FVS_MASK:\n stack.build_str(2, op)\n else:\n stack.build_str(1, op)\n elif isinstance(op, opcodes.BUILD_STRING):\n stack.build_str(op.arg, op)\n elif isinstance(op, opcodes.BUILD_MAP):\n map_ = stack.fold_map_args(op.arg, op)\n if map_:\n typ = ('map', (map_.key_types, map_.value_types))\n val = dict(zip(map_.keys, map_.values))\n stack.push(_Constant(typ, val, map_.elements, op))\n elif isinstance(op, opcodes.BUILD_CONST_KEY_MAP):\n keys = stack.pop()\n vals = stack.fold_args(op.arg, op)\n if vals:\n keys.op.folded = op\n _, t = keys.typ\n typ = ('map', (frozenset(t), vals.types))\n val = dict(zip(keys.value, vals.values))\n elements = dict(zip(keys.value, vals.elements))\n stack.push(_Constant(typ, val, elements, op))\n elif isinstance(op, opcodes.LIST_APPEND):\n elements = stack.fold_args(2, op)\n if elements:\n lst, element = elements.elements\n tag, et = lst.typ\n assert tag == 'list'\n typ = (tag, et | {element.typ})\n value = lst.value + [element.value]\n elements = lst.elements + (element,)\n stack.push(_Constant(typ, value, elements, op))\n elif isinstance(op, opcodes.LIST_EXTEND):\n elements = stack.fold_args(2, op)\n if elements:\n lst, other = elements.elements\n tag, et = lst.typ\n assert tag == 'list'\n other_tag, other_et = other.typ\n if other_tag == 'tuple':\n # Deconstruct the tuple built in opcodes.LOAD_CONST above\n other_elts = tuple(_Constant(('prim', e), v, None, other.op)\n for (_, e), v in zip(other_et, other.value))\n elif other_tag == 'prim':\n assert other_et == str\n other_et = {other.typ}\n other_elts = tuple(_Constant(('prim', str), v, None, other.op)\n for v in other.value)\n else:\n other_elts = other.elements\n typ = (tag, et | set(other_et))\n value = lst.value + list(other.value)\n elements = lst.elements + other_elts\n stack.push(_Constant(typ, value, elements, op))\n elif isinstance(op, opcodes.MAP_ADD):\n elements = stack.fold_args(3, op)\n if elements:\n map_, key, val = elements.elements\n tag, (kt, vt) = map_.typ\n assert tag == 'map'\n typ = (tag, (kt | {key.typ}, vt | {val.typ}))\n value = {**map_.value, **{key.value: val.value}}\n elements = {**map_.elements, **{key.value: val}}\n stack.push(_Constant(typ, value, elements, op))\n elif isinstance(op, opcodes.DICT_UPDATE):\n elements = stack.fold_args(2, op)\n if elements:\n map1, map2 = elements.elements\n tag1, (kt1, vt1) = map1.typ\n tag2, (kt2, vt2) = map2.typ\n assert tag1 == tag2 == 'map'\n typ = (tag1, (kt1 | kt2, vt1 | vt2))\n value = {**map1.value, **map2.value}\n elements = {**map1.elements, **map2.elements}\n stack.push(_Constant(typ, value, elements, op))\n else:\n # If we hit any other bytecode, we are no longer building a literal\n # constant. Insert a None as a sentinel to the next BUILD op to\n # not fold itself.\n stack.push(None)\n\n # Clear the stack to save any folded constants before exiting the block\n stack.clear()\n\n # Now rewrite the block to replace folded opcodes with a single\n # LOAD_FOLDED_CONSTANT opcode.\n out = []\n for op in block:\n if id(op) in stack.consts:\n t = stack.consts[id(op)]\n arg = t\n pretty_arg = t\n o = opcodes.LOAD_FOLDED_CONST(op.index, op.line, arg, pretty_arg)\n o.next = op.next\n o.target = op.target\n o.block_target = op.block_target\n o.code = op.code\n op.folded = o\n folds.add(op)\n out.append(o)\n elif op.folded:\n folds.add(op)\n else:\n out.append(op)\n block.code = out\n\n # Adjust 'next' and 'target' pointers to account for folding.\n for op in code.code_iter:\n if op.next:\n op.next = folds.resolve(op.next)\n if op.target:\n op.target = folds.resolve(op.target)\n return code", "def _wrap_code(self, inner):\r\n yield 0, \"<code>\"\r\n for tup in inner:\r\n yield tup\r\n yield 0, \"</code>\"", "def parse_code(tokens):\n tok = tokens.pop(0)\n if tok != ('COMMAND', '.code'):\n raise RuntimeError('Expected keyword .code')\n\n tok = tokens.pop(0)\n if tok[0] != 'ID':\n raise RuntimeError('Invalid code name')\n\n code_name = tok[1]\n\n tok = tokens.pop(0)\n if tok != ('COMMAND', '.genome'):\n raise RuntimeError('Expected keyword .genome')\n\n tok = tokens.pop(0)\n if tok[0] != 'ID':\n raise RuntimeError('Invalid genome name')\n\n genome_name = tok[1]\n if genome_name not in genome_dict:\n read_genome(genome_name)\n ret = defines.GenomeCode(code_name, genome_dict[genome_name])\n\n while tokens != []:\n tok = tokens.pop(0)\n\n if tok != ('COMMAND', '.gene'):\n raise RuntimeError('Expected keyword .gene')\n\n tok = tokens.pop(0)\n if tok[0] != 'ID':\n raise RuntimeError('Invalid gene name')\n\n gene = ret.genome.find_gene(tok[1])\n if gene == None:\n raise RuntimeError('Nonexistent gene \\'%s\\'' % tok[1])\n\n allele1 = tokens.pop(0)\n allele2 = tokens.pop(0)\n if allele1[0] != 'ID' or allele2[0] != 'ID':\n raise RuntimeError('Allele must be an ID')\n\n gene_code = defines.GeneCode(gene, allele1[1], allele2[1])\n ret.add_gene_code(gene_code)\n\n return ret", "def CompileClass(self):\n\n ## Go to first token\n self.Tokenizer.advance()\n\n ## Expecting class keyword\n self._eat('class')\n self._write_opener('class')\n self._write_entry('keyword','class')\n\n ## Now handle the identifier\n\n if not self.Tokenizer.currentTokenType == \"IDENTIFIER\":\n raise ValueError(\"ERROR_UNEXPECTED_TOKEN: \" + self.Tokenizer.currentTokenType + \" \" + self.Tokenizer.currentToken )\n else:\n self._write_entry(self.Tokenizer.currentTokenType.lower(), self.Tokenizer.currentToken)\n\n self.Tokenizer.advance()\n\n ## Now opening curly bracket\n self._eat('{')\n self._write_entry('symbol','{')\n\n #self.Tokenizer.advance()\n\n\n # Now expecting 0 or more classVarDec\n\n # self.Tokenizer.advance()\n #\n # if self.Tokenizer.currentTokenType == \"KEYWORD\" and self.Tokenizer.currentToken in [\"static\", \"field\"]:\n # self._write_closer('class')\n # self.outputFile.close()\n\n\n ## Finally the closing brace\n try:\n self._eat('}')\n self._write_entry('symbol', '}')\n self._write_closer('class')\n except:\n print(\"waah\")\n\n self.outputFile.close()", "def tokenize(source_code):\n delimiters = '();'\n for delimiter in delimiters:\n source_code = source_code.replace(delimiter, ' '+delimiter+' ')\n return source_code.split()", "def __next__(self):\n if self.gen is None:\n self.gen = self.token_generator()\n\n tok = next(self.gen)\n while tok.type in self.IGNORED_TOKENS:\n tok = next(self.gen)\n return tok", "def tokenize(source_code):\n return source_code.replace('(',' ( ').replace(')',' ) ').split()", "def parse_code(self, line):\n code = []\n res = []\n j = 0\n i_start = 0\n self.i = 0\n self.line = line\n\n while not self._is_end():\n if self._is_syntax_start(self._peek_char()):\n token = self._read_while(lambda ch: ch == '{')\n if len(token) < 2:\n continue\n i_start = self.i - 2\n\n # read variable identifier\n if not self._is_id_start(self._peek_char()):\n continue\n val = self._read_while(self._is_id)\n if len(val) == 0:\n continue\n\n # read definition, if any\n df = None\n self._read_while(self._is_whitespace)\n if not self._is_end() and self._peek_char() == '=':\n self._next_char()\n # problem: the variable value can't contain \"}\"\n df = self._read_while(lambda ch: ch != '}')\n self._read_while(self._is_whitespace)\n\n token = self._read_while(lambda ch: ch == '}', max_len=2)\n if len(token) < 2:\n continue\n\n # read succeeds\n code.append(line[j:i_start])\n res.append(val)\n j = self.i\n\n # parse and save definition\n if df:\n try:\n df = json.loads('[{}]'.format(df))\n decision = Decision(val, df, '')\n if val in self.decisions:\n msg = 'Duplicate variable definition \"{}\"'\n raise ParseError(msg.format(val))\n self.decisions[val] = decision\n except ValueError:\n msg = 'Cannot parse variable definition:\\n{}'\n raise ParseError(msg.format(df))\n else:\n self._next_char()\n\n code.append(line[j:])\n return res, code", "async def process(self, tokens):\n return await self.parser.process(tokens)", "def tokens(self, ignore=True) -> _Iterator[Token]:\n self.reset()\n header = True\n while not self.EOF:\n token = self._get_token()\n if token is not None:\n if ignore:\n if header and not token.check_token(_token_names.KeywordsType(\"class\")):\n continue\n else:\n header = False\n if token.check_token(_token_names.Ignored.names()):\n continue\n yield token", "def raw_tokenize(src: str) -> Iterable[RawToken]:\n # Raw token handling; there is a later semantic mapping stage which\n # annotates atoms for the special handling of keywords and numbers.\n # We treat tokenization as an explicit state machine.\n # State transitions emit the previous block along with the previous state.\n state, start = None, 0\n\n for index, character in enumerate(src):\n next_state = None\n major_category = unicodedata.category(character) + character\n\n for (from_state, category_match), to_state in STATE_MACHINE.items():\n if (\n from_state == state and\n major_category.startswith(category_match)\n ):\n next_state = to_state\n break\n\n if next_state is None:\n raise ParseError(\n \"Unexpected '{0!r}'\".format(character),\n (index, index + 1),\n )\n\n if next_state != state:\n if start != index:\n assert state is not None\n\n yield RawToken(\n kind=state,\n value=src[start:index],\n location=(start, index),\n )\n start = index\n state = next_state\n\n if start != len(src):\n assert state is not None\n\n yield RawToken(\n kind=state,\n value=src[start:],\n location=(start, index + 1),\n )", "def something(token_class):\n def callback(lexer, match, context):\n text = match.group()\n if not text:\n return\n yield match.start(), token_class, text\n context.pos = match.end()\n return callback", "def ccode(self):\n from devito.ir.iet.visitors import CGen\n return CGen().visit(self)", "def tokenize(src):\n\n pass", "def _parse(self):\n extended_flow = ['else', 'elif', 'except', 'finally']\n statement_toks = ['{', '[', '(', '`']\n\n self._decorators = []\n self.freshscope = True\n self.iterator = iter(self)\n # This iterator stuff is not intentional. It grew historically.\n for token_type, tok in self.iterator:\n self.module.temp_used_names = []\n # debug.dbg('main: tok=[%s] type=[%s] indent=[%s]'\\\n # % (tok, tokenize.tok_name[token_type], start_position[0]))\n\n while token_type == tokenize.DEDENT and self._scope != self.module:\n token_type, tok = self.next()\n if self.start_pos[1] <= self._scope.start_pos[1]:\n self._scope.end_pos = self.start_pos\n self._scope = self._scope.parent\n if isinstance(self._scope, pr.Module) \\\n and not isinstance(self._scope, pr.SubModule):\n self._scope = self.module\n\n # check again for unindented stuff. this is true for syntax\n # errors. only check for names, because thats relevant here. If\n # some docstrings are not indented, I don't care.\n while self.start_pos[1] <= self._scope.start_pos[1] \\\n and (token_type == tokenize.NAME or tok in ['(', '['])\\\n and self._scope != self.module:\n self._scope.end_pos = self.start_pos\n self._scope = self._scope.parent\n if isinstance(self._scope, pr.Module) \\\n and not isinstance(self._scope, pr.SubModule):\n self._scope = self.module\n\n use_as_parent_scope = self.top_module if isinstance(self._scope,\n pr.SubModule) else self._scope\n first_pos = self.start_pos\n if tok == 'def':\n func = self._parse_function()\n if func is None:\n debug.warning(\"function: syntax error@%s\" %\n self.start_pos[0])\n continue\n self.freshscope = True\n self._scope = self._scope.add_scope(func, self._decorators)\n self._decorators = []\n elif tok == 'class':\n cls = self._parse_class()\n if cls is None:\n debug.warning(\"class: syntax error@%s\" % self.start_pos[0])\n continue\n self.freshscope = True\n self._scope = self._scope.add_scope(cls, self._decorators)\n self._decorators = []\n # import stuff\n elif tok == 'import':\n imports = self._parse_import_list()\n for count, (m, alias, defunct) in enumerate(imports):\n e = (alias or m or self).end_pos\n end_pos = self.end_pos if count + 1 == len(imports) else e\n i = pr.Import(self.module, first_pos, end_pos, m,\n alias, defunct=defunct)\n self._check_user_stmt(i)\n self._scope.add_import(i)\n if not imports:\n i = pr.Import(self.module, first_pos, self.end_pos, None,\n defunct=True)\n self._check_user_stmt(i)\n self.freshscope = False\n elif tok == 'from':\n defunct = False\n # take care for relative imports\n relative_count = 0\n while True:\n token_type, tok = self.next()\n if tok != '.':\n break\n relative_count += 1\n # the from import\n mod, token_type, tok = self._parse_dot_name(self._current)\n if str(mod) == 'import' and relative_count:\n self._gen.push_last_back()\n tok = 'import'\n mod = None\n if not mod and not relative_count or tok != \"import\":\n debug.warning(\"from: syntax error@%s\" % self.start_pos[0])\n defunct = True\n if tok != 'import':\n self._gen.push_last_back()\n names = self._parse_import_list()\n for count, (name, alias, defunct2) in enumerate(names):\n star = name is not None and name.names[0] == '*'\n if star:\n name = None\n e = (alias or name or self).end_pos\n end_pos = self.end_pos if count + 1 == len(names) else e\n i = pr.Import(self.module, first_pos, end_pos, name,\n alias, mod, star, relative_count,\n defunct=defunct or defunct2)\n self._check_user_stmt(i)\n self._scope.add_import(i)\n self.freshscope = False\n # loops\n elif tok == 'for':\n set_stmt, tok = self._parse_statement(added_breaks=['in'])\n if tok == 'in':\n statement, tok = self._parse_statement()\n if tok == ':':\n s = [] if statement is None else [statement]\n f = pr.ForFlow(self.module, s, first_pos, set_stmt)\n self._scope = self._scope.add_statement(f)\n else:\n debug.warning('syntax err, for flow started @%s',\n self.start_pos[0])\n if statement is not None:\n statement.parent = use_as_parent_scope\n if set_stmt is not None:\n set_stmt.parent = use_as_parent_scope\n else:\n debug.warning('syntax err, for flow incomplete @%s',\n self.start_pos[0])\n if set_stmt is not None:\n set_stmt.parent = use_as_parent_scope\n\n elif tok in ['if', 'while', 'try', 'with'] + extended_flow:\n added_breaks = []\n command = tok\n if command in ['except', 'with']:\n added_breaks.append(',')\n # multiple inputs because of with\n inputs = []\n first = True\n while first or command == 'with' \\\n and tok not in [':', '\\n']:\n statement, tok = \\\n self._parse_statement(added_breaks=added_breaks)\n if command == 'except' and tok in added_breaks:\n # the except statement defines a var\n # this is only true for python 2\n n, token_type, tok = self._parse_dot_name()\n if n:\n n.parent = statement\n statement.set_vars.append(n)\n if statement:\n inputs.append(statement)\n first = False\n\n if tok == ':':\n f = pr.Flow(self.module, command, inputs, first_pos)\n if command in extended_flow:\n # the last statement has to be another part of\n # the flow statement, because a dedent releases the\n # main scope, so just take the last statement.\n try:\n s = self._scope.statements[-1].set_next(f)\n except (AttributeError, IndexError):\n # If set_next doesn't exist, just add it.\n s = self._scope.add_statement(f)\n else:\n s = self._scope.add_statement(f)\n self._scope = s\n else:\n for i in inputs:\n i.parent = use_as_parent_scope\n debug.warning('syntax err, flow started @%s',\n self.start_pos[0])\n # returns\n elif tok in ['return', 'yield']:\n s = self.start_pos\n self.freshscope = False\n # add returns to the scope\n func = self._scope.get_parent_until(pr.Function)\n if tok == 'yield':\n func.is_generator = True\n\n stmt, tok = self._parse_statement()\n if stmt is not None:\n stmt.parent = use_as_parent_scope\n try:\n func.returns.append(stmt)\n # start_pos is the one of the return statement\n stmt.start_pos = s\n except AttributeError:\n debug.warning('return in non-function')\n # globals\n elif tok == 'global':\n stmt, tok = self._parse_statement(self._current)\n if stmt:\n self._scope.add_statement(stmt)\n for name in stmt.used_vars:\n # add the global to the top, because there it is\n # important.\n self.module.add_global(name)\n # decorator\n elif tok == '@':\n stmt, tok = self._parse_statement()\n if stmt is not None:\n self._decorators.append(stmt)\n elif tok == 'pass':\n continue\n elif tok == 'assert':\n stmt, tok = self._parse_statement()\n if stmt is not None:\n stmt.parent = use_as_parent_scope\n self._scope.asserts.append(stmt)\n # default\n elif token_type in [tokenize.NAME, tokenize.STRING,\n tokenize.NUMBER] \\\n or tok in statement_toks:\n # this is the main part - a name can be a function or a\n # normal var, which can follow anything. but this is done\n # by the statement parser.\n stmt, tok = self._parse_statement(self._current)\n if stmt:\n self._scope.add_statement(stmt)\n self.freshscope = False\n else:\n if token_type not in [tokenize.COMMENT, tokenize.INDENT,\n tokenize.NEWLINE, tokenize.NL]:\n debug.warning('token not classified', tok, token_type,\n self.start_pos[0])\n continue\n self.no_docstr = False", "def create_token_generator(self):\n while self.current_char is not None:\n # handle whitespaces\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n # handle integer and float numbers\n if self.current_char.isdigit():\n yield self.number()\n continue\n\n # handle identifiers e.g variable names\n if self.current_char.isalpha():\n yield self.identifier()\n continue\n\n # handle strings e.g \"Hello, World\"\n if self.current_char == '\"':\n self.advance() # skip opening quote\n yield self.string()\n self.advance() # skip closing quote\n continue\n\n # handle single characters e.g symbols\n if self.current_char in self.tokentype.keys():\n char = self.current_char\n self.advance()\n yield self.generic_token(char)\n continue\n # add token to indicate end of file (EOF)\n yield Token(self.tokentype['EOF'], None)", "def tokens():\n pass", "def fix_code():\n itoks = tokenize.generate_tokens(StringIO.StringIO(code).readline)\n def run():\n for toktype, toktext, (srow, scol), (erow, ecol), line in itoks:\n if toktext == 'StateDataReporter':\n toktext = '__queue_reporter_factory(__queue)'\n yield (toktype, toktext, (srow, scol), (erow, ecol), line)\n\n return tokenize.untokenize(run()) + '__queue.put(None)'", "def transform(code: StringIO, transformer: TBangTransformer) -> Iterator[str]:\n tokens = tokenize.generate_tokens(code.readline)\n\n bangexpr = [] # type: List[str]\n bangcont = False\n prebang = None\n ptkn = None\n indent = 0\n bang_indent = -100\n last_bang_line = -100\n for ctkn in tokens:\n\n if ctkn.type == tokenize.INDENT:\n indent += 1\n if last_bang_line + 1 == ctkn.start[0]:\n bang_indent = indent\n elif ctkn.type == tokenize.DEDENT:\n indent -= 1\n if bang_indent > indent:\n bang_indent = -100\n\n # due to continuations we can't rely on NEWLINE tokens, instead we have\n # use the lexical information to detect when we're on a new line\n #TODO: Support indent/dedent for multiline\n if ptkn and ctkn.start[0] > ptkn.start[0]:\n if bangcont or bang_indent == indent:\n if ctkn.type is tokenize.ENDMARKER:\n raise SyntaxError('BangExpr continuation at program end')\n\n line = ctkn.line.rstrip('\\r\\n')\n bangexpr.append(line)\n bangcont = line.endswith('\\\\')\n last_bang_line = ctkn.start[0]\n elif bangexpr:\n lines = list(transformer(bangexpr))\n assert len(lines) <= len(bangexpr)\n if lines and prebang:\n lines[0] = prebang + lines[0]\n\n yield from lines\n bangexpr = []\n last_bang_line = ptkn.start[0]\n else:\n yield ptkn.line\n\n ptkn = ctkn\n\n if bangexpr:\n continue\n\n if ctkn.string == '!':\n col = ctkn.start[1]\n prebang = ctkn.line[0:col]\n line = ctkn.line[col+1:].lstrip(' \\t').rstrip('\\r\\n')\n bangexpr.append(line.rstrip('\\\\'))\n bangcont = line.endswith('\\\\')\n last_bang_line = ctkn.start[0]\n\n assert not bangexpr, bangexpr", "def tokenize(self):\n\n while not self.is_end_input(): \n #detect the beginning of each token \n #then call other methods if the token is of variable size\n if self.current_char.isspace():\n self.lex_whitespace()\n continue #do not need to return a token here\n if self.current_char.isdigit():\n return Token(INTEGER,self.lex_integer())\n if self.current_char == '+':\n self.advance()\n return Token(PLUS,'+')\n if self.current_char == '-':\n self.advance()\n return Token(MINUS,'-')\n if self.current_char == '*':\n self.advance()\n return Token(MUL, '*')\n\n if self.current_char == '/':\n self.advance()\n return Token(DIV, '/')\n\n if self.current_char == '(':\n self.advance()\n return Token(LPAREN, '(')\n\n if self.current_char == ')':\n self.advance()\n return Token(RPAREN, ')')\n \n self.error()\n \n return Token(EOF, None)" ]
[ "0.6318408", "0.63139665", "0.6242765", "0.6229688", "0.607422", "0.6070667", "0.603829", "0.5967526", "0.5900992", "0.5900432", "0.5861038", "0.5828368", "0.5738229", "0.57144123", "0.5686451", "0.56318927", "0.56315476", "0.5565116", "0.549738", "0.54870355", "0.54863477", "0.5465418", "0.5454488", "0.54251003", "0.54150665", "0.5410599", "0.5389503", "0.53388375", "0.5336293", "0.5322054" ]
0.72035605
0
Execute qemuimg inside a container that mounts input_path and output_path to itself
def convert(input_format, output_format, input_path, output_path): # mount the input file to /work/<filename> inside the container path_in = Path(input_path) input_abspath = path_in.absolute().__str__() assert_path_exists(input_abspath) internal_input_path = f"/input/{path_in.name}" in_mount = f"-v {input_abspath}:{internal_input_path}" # The mount for the output dir varies depending on if its a file or block device path_out = Path(output_path) if path_out.is_block_device(): # directly map the block device to the container assert_path_exists(path_out) out_mount = f"--device {path_out}" internal_output_path = path_out else: # output is a file (or about to be), so mount the folder it exists in output_abspath = path_out.absolute().__str__() output_dir = Path(output_abspath).parent.__str__() assert_path_exists(output_dir) internal_output_dir = "/output" internal_output_path = f"{internal_output_dir}/{path_out.name}" out_mount = f"-v {output_dir}:{internal_output_dir}" name = "qemu-img" image = "breqwatr/qemu-img:latest" run = ( f"qemu-img convert -f {input_format} -O {output_format} " f"{internal_input_path} {internal_output_path}" ) cmd = f"docker run -it --name {name} --rm {in_mount} {out_mount} {image} {run}" shell(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli(context):\n dev = f\"docker run -it -v {PWD}:/local {IMAGE_NAME}:{IMAGE_VER} /bin/bash\"\n print(f\"{dev}\")\n context.run(f\"{dev}\", pty=True)", "def convert_image(source, dest, out_format, run_as_root=False):\n cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)\n utils.execute(*cmd, run_as_root=run_as_root)", "def start_qemu_sdimg(latest_sdimg):\n fh, img_path = tempfile.mkstemp(suffix=\".sdimg\", prefix=\"test-image\")\n # don't need an open fd to temp file\n os.close(fh)\n\n # Make a disposable image.\n shutil.copy(latest_sdimg, img_path)\n\n # pass QEMU drive directly\n qenv = {}\n qenv[\"VEXPRESS_IMG\"] = img_path\n qenv[\"MACHNE\"] = \"vexpress-qemu\"\n\n try:\n qemu = start_qemu(qenv)\n except:\n os.remove(img_path)\n raise\n\n return qemu, img_path", "def show(vol_path):\n name = \"qemu-img\"\n image = \"breqwatr/qemu-img:latest\"\n path = Path(vol_path)\n vol_abspath = path.absolute().__str__()\n run = f\"qemu-img info {vol_abspath}\"\n mount = f\"-v {vol_abspath}:{vol_abspath}\"\n cmd = f\"docker run --rm -it --name {name} {mount} {image} {run}\"\n shell(cmd)", "def dockerfile_with_path_map() -> co.Exec:\n path_map = {\"./code\": \"/root/code\"}\n image = co.Image(\n dockerfile=\"./docker/Dockerfile.copy\", context=\".\", path_map=path_map\n )\n return co.Exec(\"python /root/code/test.py\", image=image, doc=co.util.magic_doc())", "def input(host_directory_path):\n return create_volume_string(host_directory_path, BIOBOX_INPUT_MOUNT_LOC, True)", "def do_run(cs, args):\n opts = {}\n opts['name'] = args.name\n opts['image'] = args.image\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['environment'] = zun_utils.format_args(args.environment)\n opts['workdir'] = args.workdir\n opts['auto_remove'] = args.auto_remove\n opts['labels'] = zun_utils.format_args(args.label)\n opts['image_pull_policy'] = args.image_pull_policy\n opts['image_driver'] = args.image_driver\n opts['hints'] = zun_utils.format_args(args.hint)\n opts['nets'] = zun_utils.parse_nets(args.net)\n opts['mounts'] = zun_utils.parse_mounts(args.mount)\n opts['runtime'] = args.runtime\n opts['hostname'] = args.hostname\n opts['disk'] = args.disk\n opts['availability_zone'] = args.availability_zone\n opts['command'] = args.command\n opts['registry'] = args.registry\n opts['host'] = args.host\n if args.entrypoint:\n opts['entrypoint'] = zun_utils.parse_entrypoint(args.entrypoint)\n if args.healthcheck:\n opts['healthcheck'] = zun_utils.parse_health(args.healthcheck)\n\n if args.auto_heal:\n opts['auto_heal'] = args.auto_heal\n if args.security_group:\n opts['security_groups'] = args.security_group\n if args.expose_port:\n opts['exposed_ports'] = zun_utils.parse_exposed_ports(args.expose_port)\n if args.restart:\n opts['restart_policy'] = zun_utils.check_restart_policy(args.restart)\n if args.interactive:\n opts['interactive'] = True\n if args.privileged:\n opts['privileged'] = True\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.run(**opts)\n _show_container(container)\n container_uuid = getattr(container, 'uuid', None)\n if args.interactive:\n ready_for_attach = False\n while True:\n container = cs.containers.get(container_uuid)\n if zun_utils.check_container_status(container, 'Running'):\n ready_for_attach = True\n break\n if zun_utils.check_container_status(container, 'Error'):\n raise exceptions.ContainerStateError(container_uuid)\n print(\"Waiting for container start\")\n time.sleep(1)\n if ready_for_attach is True:\n response = cs.containers.attach(container_uuid)\n websocketclient.do_attach(cs, response, container_uuid, \"~\", 0.5)\n else:\n raise exceptions.InvalidWebSocketLink(container_uuid)", "def cat_cmd(server, client, line):\n if len(line.split(' ')) > 1 and line.split(' ')[1] == \"/proc/mounts\":\n path = os.path.dirname(os.path.realpath(__file__))\n path = path[:-7] # shaves off /engine\n with open(\"{}/fakefiles/proc%mounts\".format(path), \"r\") as f:\n response = f.read()\n client.exit_status = 0\n else:\n response = client.run_in_container(line)\n client.send(response)", "def run_in_docker(docker_image: str,\n source_code_path: str,\n *,\n environment_vars: List[str] = None,\n command: str = None,\n result_path: str =\"/tmp/results\") -> str:\n\n #\n # Create temporal directory in the local host.\n #\n # We use dir=\"/tmp\" because in OSX, TemporaryDirectory function create the\n # temporal dir in path \"/var/...\" and, by default, Docker hasn't permission\n # to mount this directory, raising a error like that:\n #\n # The path /var/folders/_h/b5wqbhtn4zvcmsshf8nv1kcr0000gn/T/deeptracykyoc\n # is not shared from OS X and is not known to Docker.\n # You can configure shared paths from Docker ->\n # Preferences... -> File Sharing.\n # See https://docs.docker.com/docker-for-mac/osxfs/#namespaces for\n # more info.\n # -------------------------------------------------------------------------\n with tempfile.TemporaryDirectory(prefix=\"deeptracy\",\n dir=\"/tmp\") as tmp_dir:\n\n docker_client = docker.from_env()\n\n result_file_name = \"\".join(str(random.randint(0, 9))\n for _ in range(30))\n host_result_file = os.path.join(tmp_dir, result_file_name)\n\n # Choice volumes\n docker_volumes = {\n tmp_dir: {\n 'bind': result_path,\n 'mode': 'rw'\n },\n source_code_path: {\n 'bind': \"/opt/app\",\n 'mode': 'ro'\n },\n }\n\n # --------------------------------------------------------------------------\n # Build function call options\n # --------------------------------------------------------------------------\n envs = environment_vars or []\n envs.extend([\n # \"OUTPUT_FILE={}\".format(container_result_file)\n \"OUTPUT_FILE={}\".format(result_file_name)\n ])\n\n # Run content inside docker\n docker_client.containers.run(\n image=docker_image,\n remove=True,\n command=command,\n environment=envs,\n volumes=docker_volumes\n )\n\n # Read result file\n with open(host_result_file, \"r\") as f:\n yield f.read()", "def raw(self, out, config, size, **filters):\r\n with vm2vm.raw.RawImage(out, \"w\") as img:\r\n img.mkfs(size)\r\n with vm2vm.raw.Mountpoint(img.name) as mnt:\r\n self.copy(dest=mnt, config=config, **filters)", "def docker_exec(cmdline):\n local('docker exec -ti {} {}'.format(project_name, cmdline))", "def qemu_img_create(config, size_mb):\n\n opts = [\n \"key-secret=sec0\",\n \"iter-time=10\",\n \"cipher-alg=%s-%d\" % (config.cipher, config.keylen),\n \"cipher-mode=%s\" % config.mode,\n \"ivgen-alg=%s\" % config.ivgen,\n \"hash-alg=%s\" % config.hash,\n ]\n if config.ivgen_hash is not None:\n opts.append(\"ivgen-hash-alg=%s\" % config.ivgen_hash)\n\n args = [\"create\", \"-f\", \"luks\",\n \"--object\",\n (\"secret,id=sec0,data=%s,format=base64\" %\n config.first_password_base64()),\n \"-o\", \",\".join(opts),\n config.image_path(),\n \"%dM\" % size_mb]\n\n iotests.log(\"qemu-img \" + \" \".join(args), filters=[iotests.filter_test_dir])\n iotests.log(iotests.qemu_img_pipe(*args), filters=[iotests.filter_test_dir])", "def _exec_command_in_container(client, container, command):\n exec_id = client.exec_create(container, command)\n output = client.exec_start(exec_id).decode('utf-8')\n logger.info(output)\n return output", "def _run(\n name,\n cmd,\n output=None,\n no_start=False,\n preserve_state=True,\n stdin=None,\n python_shell=True,\n output_loglevel=\"debug\",\n use_vt=False,\n path=None,\n ignore_retcode=False,\n chroot_fallback=None,\n keep_env=\"http_proxy,https_proxy,no_proxy\",\n):\n orig_state = state(name, path=path)\n try:\n if attachable(name, path=path):\n ret = __salt__[\"container_resource.run\"](\n name,\n cmd,\n path=path,\n container_type=__virtualname__,\n exec_driver=EXEC_DRIVER,\n output=output,\n no_start=no_start,\n stdin=stdin,\n python_shell=python_shell,\n output_loglevel=output_loglevel,\n ignore_retcode=ignore_retcode,\n use_vt=use_vt,\n keep_env=keep_env,\n )\n else:\n if not chroot_fallback:\n raise CommandExecutionError(f\"{name} is not attachable.\")\n rootfs = info(name, path=path).get(\"rootfs\")\n # Set context var to make cmd.run_chroot run cmd.run instead of\n # cmd.run_all.\n __context__[\"cmd.run_chroot.func\"] = __salt__[\"cmd.run\"]\n ret = __salt__[\"cmd.run_chroot\"](\n rootfs,\n cmd,\n stdin=stdin,\n python_shell=python_shell,\n output_loglevel=output_loglevel,\n ignore_retcode=ignore_retcode,\n )\n finally:\n # Make sure we honor preserve_state, even if there was an exception\n new_state = state(name, path=path)\n if preserve_state:\n if orig_state == \"stopped\" and new_state != \"stopped\":\n stop(name, path=path)\n elif orig_state == \"frozen\" and new_state != \"frozen\":\n freeze(name, start=True, path=path)\n\n if output in (None, \"all\"):\n return ret\n else:\n return ret[output]", "def test_baseimage_first(self):\n s = Stage()\n s += shell(commands=['abc'])\n s.name = 'bar'\n s.baseimage('foo')\n self.assertEqual(str(s), 'FROM foo AS bar\\n\\nRUN abc')", "def run_in_docker(image, commands): # pragma: no cover\n repo_dir = os.path.abspath(os.path.join(__file__, '../../'))\n mount_option = '{}:/mnt:ro'.format(repo_dir)\n\n cmd = ('docker', 'run', '-v', mount_option, '-i', image, 'sh')\n proc = subprocess.Popen(\n cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n )\n\n lines = '\\n'.join(commands)\n return proc.communicate(lines.encode('utf-8'))[0].decode('utf-8')", "def snapshot(source, destination):\n\n processutils.execute(\n 'qemu-img convert --force-share -O qcow2 %s %s'\n % (source, destination),\n shell=True)", "def main():\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,\n format='%(name)s (%(levelname)s): %(message)s')\n\n kvm = pathlib.Path(\"/disks/d/VMWare/KVM\")\n os = { \n 'win7':'/disks/d/OS/Windows/Windows_7/Windows_7_LITE_X64.iso',\n 'win7_full':'/disks/d/OS/Windows/Windows_7/fr_windows_7_ultimate_x64_dvd_x15-65928.iso',\n 'mint':'/disks/d/OS/Unix/Mint/linuxmint-18.3-cinnamon-64bit.iso',\n 'solaris':'/disks/d/OS/Unix/Solaris/11/sol-11_3-text-x86.iso'\n }\n\n try:\n arguments = parse_command_line(sys.argv)\n # Assign args to variables\n server = arguments.name\n os = arguments.os\n legacy = arguments.legacy\n dry = arguments.dry_run\n if kvm.path.exists():\n kvm_disk = kvm_path + server\n command = \"virt-install --ram 2048 --disk path=${DIR_HOST}/${HOST}.qcow2,size=8 --vcpus 2 --os-type linux --os-variant ubuntuquantal --network bridge=virbr0\"\n if dry:\n print(command)\n print(kvm_disk)\n\n except KeyboardInterrupt:\n log.error('Program interrupted!')\n finally:\n logging.shutdown()", "def dockerize_test(ctx, binary, skip_cleanup=False):\n import docker\n\n client = docker.from_env()\n temp_folder = tempfile.mkdtemp(prefix=\"ddtest-\")\n\n ctx.run(\"cp %s %s/test.bin\" % (binary, temp_folder))\n\n with open(\"%s/Dockerfile\" % temp_folder, 'w') as stream:\n stream.write(\n \"\"\"FROM debian:stretch-slim\nENV DOCKER_DD_AGENT=yes\nWORKDIR /\nADD https://github.com/docker/compose/releases/download/1.16.1/docker-compose-Linux-x86_64 /bin/docker-compose\nRUN echo \"1804b0ce6596efe707b9cab05d74b161833ed503f0535a937dd5d17bea8fc50a /bin/docker-compose\" > sum && \\\n sha256sum -c sum && \\\n chmod +x /bin/docker-compose\nCMD /test.bin\nCOPY test.bin /test.bin\n\"\"\"\n )\n # Handle optional testdata folder\n if os.path.isdir(\"./testdata\"):\n ctx.run(\"cp -R testdata %s\" % temp_folder)\n stream.write(\"COPY testdata /testdata\")\n\n test_image, _ = client.images.build(path=temp_folder, rm=True)\n\n scratch_volume = client.volumes.create()\n\n test_container = client.containers.run(\n test_image.id,\n detach=True,\n pid_mode=\"host\", # For origin detection\n environment=[\"SCRATCH_VOLUME_NAME=\" + scratch_volume.name, \"SCRATCH_VOLUME_PATH=/tmp/scratch\",],\n volumes={\n '/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'mode': 'ro'},\n '/proc': {'bind': '/host/proc', 'mode': 'ro'},\n '/sys/fs/cgroup': {'bind': '/host/sys/fs/cgroup', 'mode': 'ro'},\n scratch_volume.name: {'bind': '/tmp/scratch', 'mode': 'rw'},\n },\n )\n\n exit_code = test_container.wait()['StatusCode']\n\n print(test_container.logs(stdout=True, stderr=False, stream=False))\n\n sys.stderr.write(test_container.logs(stdout=False, stderr=True, stream=False).decode(sys.stderr.encoding))\n\n if not skip_cleanup:\n shutil.rmtree(temp_folder)\n test_container.remove(v=True, force=True)\n scratch_volume.remove(force=True)\n client.images.remove(test_image.id)\n\n if exit_code != 0:\n raise Exit(code=exit_code)", "def script(arch, version, variant, packages, mirror, disk_size, swap_size, image_format, root_password, hostname, no_confirm, output):\n\n # Checking validity of the command-line arguments.\n check_arguments(locals())\n\n # Checking if dependencies for this script are installed.\n check_dependencies(arch)\n\n # Make sure output value is correct and we won't run into any issue at the\n # final step (moving the result to its final location). We check the file\n # does not exist, and if its parent directory does exist.\n output = os.path.abspath(output)\n assert not os.path.exists(output), \"output value is incocrect; destination exists\"\n assert os.path.isdir(os.path.dirname(output)), \"output value is incorrect; parent folder does not exist\"\n\n # Compute partitions info (todo; compute actual infos)\n partitions = [\n ('esp', 512, 'fat32', 42, 42),\n ('root', 512, 'ext4', 42, 42),\n ('swap', 512, 'swap', 42, 42)\n ]\n\n # Compute list of packages that will be explicitly installed.\n packages = compute_packages(packages, locals())\n\n # Printing summary and asking for confirmation.\n summary_mesage = compute_summary_message(locals())\n\n if not no_confirm:\n summary_mesage += \\\n \"\\nPass the --no-confirm flag if you don't want to be prompted for confirmation.\\n\"\n\n print(summary_mesage)\n\n if not no_confirm:\n is_confirmed = input(\"Do you confirm those options ? [y/n] \")\n if not is_confirmed.lower().startswith('y'):\n print(\"Abort!\")\n exit(1)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n print(f\"Creating a raw disk image of size {disk_size}MiB\")\n disk_path = create_disk_image(tmp_dir, disk_size)\n\n print(\"Partitioning the disk...\")\n partition_disk(disk_path, disk_size, swap_size)\n\n loop_device = '/dev/loop42'\n with attach_to_loop_device(disk_path, loop_device):\n print(\"Formatting partitions...\")\n format_partitions(loop_device)\n\n mount_dir = os.path.join(tmp_dir, 'mnt')\n os.mkdir(mount_dir)\n\n with mount_root_partition(loop_device, mount_dir):\n create_chroot_environment(mount_dir, arch, version, variant, mirror, packages)\n\n # In order to chroot into a filesystem with an architecture\n # different than the host, we need to install a binary\n # interpreter.\n if arch == 'armhf':\n print(\"Copying qemu-arm-static to the chroot environment\")\n shutil.copy2('/usr/bin/qemu-arm-static', os.path.join(mount_dir, 'usr/bin/'))\n elif arch == 'arm64':\n print(\"Copying qemu-aarch64-static to the chroot environment\")\n shutil.copy2('/usr/bin/qemu-aarch64-static', os.path.join(mount_dir, 'usr/bin/'))\n\n with mount_boot_partition(loop_device, mount_dir):\n with mount_transient_files(mount_dir):\n # Configure the GRUB boot loader.\n if arch == 'armhf':\n grub_package = 'grub-efi-arm'\n grub_target = 'arm-efi'\n elif arch == 'arm64':\n grub_package = 'grub-efi-arm64'\n grub_target = 'arm64-efi'\n elif arch == 'amd64':\n grub_package = 'grub-efi-amd64'\n grub_target = 'x86_64-efi'\n\n update_system_cmd = ['apt-get', 'update']\n run_chroot_command(update_system_cmd, mount_dir, arch)\n\n install_grub_pkg_cmd = ['apt-get', 'install', '-y', '--install-recommends', grub_package]\n run_chroot_command(install_grub_pkg_cmd, mount_dir, arch)\n\n purge_osprober_cmd = ['apt-get', '--autoremove', '-y', 'purge', 'os-prober']\n run_chroot_command(purge_osprober_cmd, mount_dir, arch)\n\n # Adjust the '/etc/default/grub' file\n with open(os.path.join(mount_dir, 'etc/default/grub'), 'r') as file:\n text = file.read()\n\n # TODO; adjust text variable\n\n with open(os.path.join(mount_dir, 'etc/default/grub'), 'w') as file:\n file.write(text)\n\n grub_mkconfig_cmd = ['grub-mkconfig', '-o', '/boot/grub/grub.cfg']\n run_chroot_command(grub_mkconfig_cmd, mount_dir, arch)\n\n grub_install_cmd = [\n 'grub-install',\n f'--target={grub_target}',\n '--force-extra-removable',\n '--no-nvram',\n '--no-floppy',\n '--modules=\\\\\"part_msdos part_gpt\\\\\"',\n '--grub-mkdevicemap=/boot/grub/device.map',\n loop_device\n ]\n run_chroot_command(grub_install_cmd, mount_dir, arch)\n\n print(\"Updating /etc/hostname\")\n configure_hostname(mount_dir, 'foo')\n\n print(\"Updating /etc/fstab\")\n configure_fstab(mount_dir)\n\n print(\"Updating /etc/network/interfaces\")\n configure_network_interfaces(mount_dir)\n\n # TODO; run user provided script here...\n\n # Remove the binary interpreter from the chroot environment.\n if arch == 'armhf':\n print(\"Removing qemu-arm-static from the chroot environment\")\n os.remove(os.path.join(mount_dir, 'usr/bin/qemu-arm-static'))\n elif arch == 'arm64':\n print(\"Removing qemu-aarch64-static from the chroot environment\")\n os.remove(os.path.join(mount_dir, 'usr/bin/qemu-aarch64-static'))\n\n # Convert raw disk image to the requested format and and move the result to\n # the requested location.\n if image_format != 'raw':\n print(f\"Converting disk image to {image_format} format...\")\n\n new_disk_path = os.path.join(tmp_dir, 'disk.' + image_format)\n subprocess.run([\n 'qemu-img', 'convert',\n '-f', 'raw',\n '-O', image_format,\n disk_path, new_disk_path\n ])\n\n disk_path = new_disk_path\n\n shutil.move(disk_path, output)\n\n print(\"Done!\")", "def run_impl(**kwargs: Any) -> None:\n try:\n config = configuration.create_transient_run_config(kwargs)\n except (\n configuration.ConfigFileOptionError,\n configuration.ConfigFileParsingError,\n configuration.CLIArgumentError,\n FileNotFoundError,\n ) as e:\n print(e, file=sys.stderr)\n sys.exit(1)\n\n store = image.ImageStore(\n backend_dir=config.image_backend, frontend_dir=config.image_frontend\n )\n trans = transient.TransientVm(config=config, store=store)\n\n try:\n trans.run()\n sys.exit(0)\n except utils.TransientProcessError as e:\n print(e, file=sys.stderr)\n sys.exit(e.returncode)", "def magick(inp, out):\n\n subprocess.call([\"magick\", inp, out], shell=os.name == \"nt\")", "def magick(inp, out):\n\n subprocess.call([\"magick\", inp, out], shell=os.name == \"nt\")", "def qemu_io_image_args(config, dev=False):\n\n if dev:\n return [\n \"--image-opts\",\n \"driver=file,filename=%s\" % config.device_path()]\n else:\n return [\n \"--object\",\n (\"secret,id=sec0,data=%s,format=base64\" %\n config.first_password_base64()),\n \"--image-opts\",\n (\"driver=luks,key-secret=sec0,file.filename=%s\" %\n config.image_path())]", "def _execute_container(self):\n pass", "def _mountiso(self, container_imagepath):\n (isomount, squashfs) = utils.get_iso_and_squashfs(container_imagepath)\n if isomount is None or squashfs is None:\n shutil.rmtree(self.containerpath)\n raise ContainerError(\"Couldn't mount or extract squashfs from {}\".format(container_imagepath))\n\n self.config.isomount = isomount\n self.config.squashfs = squashfs\n self.config.image = os.path.basename(container_imagepath)\n\n logger.debug(\"selected iso is {}, and squashfs is: {}\".format(self.config.isomount,\n self.config.squashfs))", "def add_input_image(parser, output=False):\n parser.add_argument('input', help='input image file (including .npy, .npy.gz, and '\n 'directories/wildcard names for 3D images)')\n parser.add_argument('--float', '-f', action='store_true', help='convert image to float')\n parser.add_argument('--gpu', '-g', action='store_true', help='utilize the GPU when able')", "def run(self, data):\n required = {'token', 'image_id', 'script'}\n api.validate(data, required)\n token = data['token']\n image_id = data['image_id']\n script = data['script']\n detach = data.get('detach', False)\n if not detach:\n detach = False\n host_dir = data.get('host_dir', None)\n docker_dir = data.get('docker_dir', None)\n working_dir = data.get('working_dir', None)\n # cgroup = data.get('cgroup', None)\n # TODO(jorgesece): control image private\n# credentials_module.authorize_image(\n # token,\n # image_id\n # )\n if host_dir:\n self.credentials_module.authorize_directory(token, host_dir)\n job_info = self.credentials_module.get_job_from_token(token)\n cgroup_parent = job_info.get('cgroup', None)\n container_id = self.docker_module.run_container(\n image_id,\n detach,\n script,\n host_dir=host_dir,\n docker_dir=docker_dir,\n working_dir=working_dir,\n cgroup=cgroup_parent\n )\n self.credentials_module.add_container(token, container_id)\n self.docker_module.start_container(container_id)\n if not detach:\n results = self.docker_module.logs_container(container_id)\n else:\n results = container_id\n return results", "def call_unigen_docker(input_file: Path, sample_count: int) -> Tuple[CompletedProcess, str]:\n unigen_container = 'msoos/unigen'\n input_bytes = input_file.read_bytes()\n # args = shell_split(\"--rm -i -a stdin -a stdout --samples=\"+str(sample_count))\n args = shell_split(\"--rm -i -a stdin -a stdout\")\n result = docker_run(unigen_container, args, input_bytes)\n return (result, \"\")", "def flatten_image(image, dest_image=None, no_op_cmd='/bin/true', create_kwargs={}, start_kwargs={}):\n dest_image = dest_image or image\n with temp_container(image, no_op_cmd=no_op_cmd, create_kwargs=create_kwargs, start_kwargs=start_kwargs) as c:\n run('docker export {0} | docker import - {1}'.format(c, dest_image), shell=False)" ]
[ "0.62837875", "0.6074666", "0.60684544", "0.60363144", "0.59255517", "0.586734", "0.5778542", "0.57056147", "0.5665832", "0.56376344", "0.56340975", "0.56335706", "0.55957174", "0.5517197", "0.54937905", "0.54879", "0.54570025", "0.5440607", "0.54342204", "0.5405311", "0.53974074", "0.53341305", "0.53341305", "0.53308415", "0.5324367", "0.5319903", "0.53050876", "0.5279464", "0.52782", "0.5276549" ]
0.74309325
0
Execute qemuimg show inside a container, direct mapping the volume
def show(vol_path): name = "qemu-img" image = "breqwatr/qemu-img:latest" path = Path(vol_path) vol_abspath = path.absolute().__str__() run = f"qemu-img info {vol_abspath}" mount = f"-v {vol_abspath}:{vol_abspath}" cmd = f"docker run --rm -it --name {name} {mount} {image} {run}" shell(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_show_container(self):\n pass", "def cli(context):\n dev = f\"docker run -it -v {PWD}:/local {IMAGE_NAME}:{IMAGE_VER} /bin/bash\"\n print(f\"{dev}\")\n context.run(f\"{dev}\", pty=True)", "def flash_image(disk_image, device):\n cmd = 'dd if={disk_image} | pv | sudo dd of={device_path}'\n\n populated_cmd = cmd.format(\n disk_image=disk_image,\n device_path=device.path)\n\n # why check output? because then you can do the cool\n # dd | pv | dd trick. '|pv|'' is awesome stdout.\n output = check_output(populated_cmd, shell=True)\n print(output)", "def show_volume(self, volume, check=True):\n cmd = 'cinder show ' + volume.id\n\n exit_code, stdout, stderr = self.execute_command(\n cmd, timeout=config.VOLUME_SHOW_TIMEOUT, check=check)\n\n volume_table = output_parser.table(stdout)\n show_result = {key: value for key, value in volume_table['values']}\n\n if check:\n assert_that(show_result['id'], is_(volume.id))\n if volume.name:\n assert_that(show_result['name'], is_(volume.name))\n if volume.description:\n assert_that(show_result['description'],\n is_(volume.description))", "def cat_cmd(server, client, line):\n if len(line.split(' ')) > 1 and line.split(' ')[1] == \"/proc/mounts\":\n path = os.path.dirname(os.path.realpath(__file__))\n path = path[:-7] # shaves off /engine\n with open(\"{}/fakefiles/proc%mounts\".format(path), \"r\") as f:\n response = f.read()\n client.exit_status = 0\n else:\n response = client.run_in_container(line)\n client.send(response)", "def start_qemu_flash(latest_vexpress_nor):\n\n print(\"qemu raw flash with image {}\".format(latest_vexpress_nor))\n\n # make a temp file, make sure that it has .vexpress-nor suffix, so that\n # mender-qemu will know how to handle it\n fh, img_path = tempfile.mkstemp(suffix=\".vexpress-nor\", prefix=\"test-image\")\n # don't need an open fd to temp file\n os.close(fh)\n\n # vexpress-nor is more complex than sdimg, inside it's compose of 2 raw\n # files that represent 2 separate flash banks (and each file is a 'drive'\n # passed to qemu). Because of this, we cannot directly apply qemu-img and\n # create a qcow2 image with backing file. Instead make a disposable copy of\n # flash image file.\n shutil.copyfile(latest_vexpress_nor, img_path)\n\n qenv = {}\n # pass QEMU drive directly\n qenv[\"VEXPRESS_IMG\"] = img_path\n qenv[\"MACHINE\"] = \"vexpress-qemu-flash\"\n\n try:\n qemu = start_qemu(qenv)\n except:\n os.remove(img_path)\n raise\n\n return qemu, img_path", "def map_volume(self, initiator, volume_name):\n hostID = self.get_host_id(initiator)\n uid = self.get_uid(volume_name)\n volInfo = self.get_volume_info(uid)\n volID = volInfo.get(SVC_KEY_VDISK_ID)\n\n cmd = \"svctask mkvdiskhostmap -host %s -force %s\" % (hostID, volID)\n self._svc_command(cmd)", "def loopattach(diskimg):\n result = subprocess.run(['losetup', '--find', diskimg], check=True)\n return loopdev(diskimg)", "def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)", "def MakeVmCommand(image, memory, snapshot):\n\n cmd = ['qemu-system-x86_64',\n '-hda', image,\n '--enable-kvm',\n '-m', str(memory)]\n # Concatenate any additional options.\n if snapshot:\n cmd += ['-snapshot']\n return cmd", "def start_qemu_sdimg(latest_sdimg):\n fh, img_path = tempfile.mkstemp(suffix=\".sdimg\", prefix=\"test-image\")\n # don't need an open fd to temp file\n os.close(fh)\n\n # Make a disposable image.\n shutil.copy(latest_sdimg, img_path)\n\n # pass QEMU drive directly\n qenv = {}\n qenv[\"VEXPRESS_IMG\"] = img_path\n qenv[\"MACHNE\"] = \"vexpress-qemu\"\n\n try:\n qemu = start_qemu(qenv)\n except:\n os.remove(img_path)\n raise\n\n return qemu, img_path", "def makeVolumeFromImage(self , imageid , initialconfig, instancename):\n self.initCreate(initialconfig)\n disk = self.createDisk(instancename)\n self.attachDiskToMinipad(disk )\n \n if self.startConversion(imageid , self.__server_ip , \"ImportVolume\") == False:\n return None\n\n self.detachDiskFromMinipad(disk)\n return str(disk)", "def qemu_img_info(path):\n if not os.path.exists(path):\n return QemuImgInfo()\n\n out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',\n 'qemu-img', 'info', path)\n return QemuImgInfo(out)", "def step_show(test, checks=None):\n if checks is None:\n checks = []\n test.cmd(\n \"az networkcloud virtualmachine console show \"\n \"--resource-group {resourceGroup} --virtual-machine-name {virtualMachineName}\",\n checks=checks,\n )", "def executeShow(self,\n rsrcType,\n showAdditionalParams=[],\n rsrcAdditionalParams=[]):\n\n args = [\"show\",\n \"--wavefrontHost\", util.wavefrontHostName,\n \"--apiToken\", util.wavefrontApiToken] \\\n + showAdditionalParams \\\n + [rsrcType] \\\n + rsrcAdditionalParams\n wc = wavectl.Wavectl(designForTestArgv=args)\n\n with util.StdoutCapture() as captOut:\n wc.runCmd()\n\n return captOut.str()", "def _start_instance(self, resource_handler):\n log.debug('Starting container')\n cli = resource_handler.cli\n #host_config=cli.create_host_config(network_mode=self.network_mode)\n container = cli.create_container(\n image='{0.image}:{0.tag}'.format(self),\n command=self.command,\n #host_config=host_config,\n environment=self.env\n )\n\n cli.start(container.get('Id'))\n log.debug('Started container [%s]', container)\n return str(container)", "def update_volumes():\n print 'do something useful here'", "def dicom_cli():", "def do_show(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['all_projects'] = args.all_projects\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.get(**opts)\n if args.format == 'json':\n print(jsonutils.dumps(container._info, indent=4, sort_keys=True))\n elif args.format == 'yaml':\n print(yaml.safe_dump(container._info, default_flow_style=False))\n elif args.format == 'table':\n _show_container(container)", "def volume(name, map, ramp=\"rainbow2\"):\r\n return f'\\ncmd.volume(name=\"{name}\", map=\"{map}\", ramp=\"{ramp}\")\\n'", "def _exec_command_in_container(client, container, command):\n exec_id = client.exec_create(container, command)\n output = client.exec_start(exec_id).decode('utf-8')\n logger.info(output)\n return output", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def get_details_using_inspect_command(self, container_id):\n\n try:\n p = Popen(DOCKER_INSPECT_CMD.format(container_id), shell=True, stdout=PIPE, stderr=PIPE)\n data_dump, stderr_data = p.communicate()\n log.debug('{}[*]{} Inspect result:{}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, \n json.dumps(json.loads(data_dump)),indent=4))\n\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR, e))\n return False\n\n self.data = json.loads(data_dump.decode('utf-8'))\n\n if not self.data:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'Please check if container id is valid'))\n return False\n\n self.storage_driver = self.data[0]['Driver']\n self.pid = self.data[0]['State']['Pid']\n self.container_id = self.data[0]['Id']\n\n log.debug('{}[*]{} Storage Driver: {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, self.storage_driver))\n if self.storage_driver == 'overlay2' or self.storage_driver == 'overlay':\n self.IS_OVERLAYFS = True\n self.overlay_upperdir_path = self.data[0]['GraphDriver']['Data']['UpperDir']\n self.overlay_merged_path = self.data[0]['GraphDriver']['Data']['MergedDir']\n elif self.storage_driver == 'aufs':\n self.IS_AUFSFS = True\n self.aufs_container_layerdb_path = AUFS_IMAGE_LAYERDB_PATH + self.data[0]['Id']\n else:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR,\n 'This storage driver does not support'))\n False\n\n log.debug('{}[*]{} Container id: {}'.format(DFbase.LOG_DEBUG_COLOR,\n DFbase.LOG_INFO_COLOR, self.container_id))\n return True", "def raw_image(self):\n\t\treturn FstabEntry([f\"{self.mount_point}_image\", \"emmc\", self.device])", "def cmd_CONTAINER(self, line):\r\n config = ContainerOptions(self.terminal)\r\n\r\n try:\r\n config.parseOptions(line)\r\n cmd = config.subCommand\r\n opts = config.subOptions if hasattr(config, 'subOptions') else {}\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if cmd == 'start':\r\n if (opts['name']):\r\n data = {}\r\n if opts.get('group'):\r\n data['group'] = opts['group']\r\n if opts.get('groupIp'):\r\n data['groupIp'] = opts['groupIp']\r\n if opts.get('size'):\r\n data['size'] = opts['size']\r\n if opts.get('bandwidth'):\r\n data['bandwidth'] = opts['bandwidth']\r\n if opts.get('memory'):\r\n data['memory'] = opts['memory']\r\n if opts.get('specialopts'):\r\n data['specialFeatures'] = opts['specialopts']\r\n self.callToUser('createContainer', 'robot', opts['name'],\r\n data)\r\n\r\n elif config['stop']:\r\n self.callToUser('destroyContainer', 'robot', config['stop'])\r\n elif config['services']:\r\n self.callToRosProxy('services', config['services'])\r\n elif config['topics']:\r\n self.callToRosProxy('topics', config['topics'])\r\n elif config['list']:\r\n self.callToUserAndDisplay('list_containers', 'console')\r\n elif config['username']:\r\n self.callToUserAndDisplay('list_containers_by_user', 'admin',\r\n config['username'])", "def run_image_viewer( self ):\n\n # XXX: hardcoded program name and image size.\n subprocess.Popen( [\"feh\", \"-dZ\", \"-g\", \"800x600\", self.record[\"filename\"]] )", "def display_eink(image):\n if epd:\n epd.display(epd.getbuffer(image))\n else:\n image.show()", "def test_disk(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"file1\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:References>\n- <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n...\n <ovf:Info>Virtual disk information</ovf:Info>\n- <ovf:Disk ovf:capacity=\"1\" ovf:capacityAllocationUnits=\"byte * 2^30\" \\\novf:diskId=\"vmdisk1\" ovf:fileRef=\"file1\" ovf:format=\"http://www.vmware.com/\\\ninterfaces/specifications/vmdk.html#streamOptimized\" />\n </ovf:DiskSection>\n...\n <rasd:AddressOnParent>0</rasd:AddressOnParent>\n- <rasd:ElementName>Hard Drive</rasd:ElementName>\n- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>\n- <rasd:InstanceID>6</rasd:InstanceID>\n- <rasd:Parent>3</rasd:Parent>\n- <rasd:ResourceType>17</rasd:ResourceType>\n- </ovf:Item>\n- <ovf:Item>\n- <rasd:AddressOnParent>0</rasd:AddressOnParent>\n <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.vmdk\")),\n \"deleted file should not be exported\")", "def test_finish_resize_with_volumes(self):\n\n # create instance\n instance = self._create_fake_instance_obj()\n request_spec = objects.RequestSpec()\n\n # create volume\n volume = {'instance_uuid': None,\n 'device_name': None,\n 'id': uuids.volume,\n 'size': 200,\n 'attach_status': 'detached'}\n bdm = objects.BlockDeviceMapping(\n **{'context': self.context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n\n # stub out volume attach\n def fake_volume_get(self, context, volume_id, microversion=None):\n return volume\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n\n def fake_volume_check_availability_zone(self, context,\n volume_id, instance):\n pass\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_volume_check_availability_zone)\n\n def fake_get_volume_encryption_metadata(self, context, volume_id):\n return {}\n self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',\n fake_get_volume_encryption_metadata)\n\n orig_connection_data = {\n 'target_discovered': True,\n 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id,\n 'target_portal': '127.0.0.0.1:3260',\n 'volume_id': uuids.volume_id,\n }\n connection_info = {\n 'driver_volume_type': 'iscsi',\n 'data': orig_connection_data,\n }\n\n def fake_init_conn(self, context, volume_id, session):\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn)\n\n def fake_attach(self, context, volume_id, instance_uuid, device_name,\n mode='rw'):\n volume['instance_uuid'] = instance_uuid\n volume['device_name'] = device_name\n self.stub_out('nova.volume.cinder.API.attach', fake_attach)\n\n # stub out virt driver attach\n def fake_get_volume_connector(*args, **kwargs):\n return {}\n self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',\n fake_get_volume_connector)\n\n def fake_attach_volume(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.attach_volume',\n fake_attach_volume)\n\n # attach volume to instance\n self.compute.attach_volume(self.context, instance, bdm)\n\n # assert volume attached correctly\n self.assertEqual(volume['device_name'], '/dev/vdc')\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # begin resize\n flavor = self.default_flavor\n instance.task_state = task_states.RESIZE_PREP\n instance.save()\n self.compute.prep_resize(self.context, instance=instance,\n flavor=flavor,\n image={}, request_spec=request_spec,\n filter_properties={}, node=None,\n clean_shutdown=True, migration=None,\n host_list=[])\n\n # fake out detach for prep_resize (and later terminate)\n def fake_terminate_connection(self, context, volume, connector):\n connection_info['data'] = None\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n\n migration = objects.Migration.get_by_instance_and_status(\n self.context.elevated(),\n instance.uuid, 'pre-migrating')\n self.compute.resize_instance(self.context, instance=instance,\n migration=migration, image={},\n # TODO(stephenfin): Why a JSON string?\n flavor=jsonutils.to_primitive(flavor),\n clean_shutdown=True, request_spec=request_spec)\n\n # assert bdm is unchanged\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n cached_connection_info = jsonutils.loads(bdm['connection_info'])\n self.assertEqual(cached_connection_info['data'],\n orig_connection_data)\n # but connection was terminated\n self.assertIsNone(connection_info['data'])\n\n # stub out virt driver finish_migration\n def fake(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)\n\n instance.task_state = task_states.RESIZE_MIGRATED\n instance.save()\n\n # new initialize connection\n new_connection_data = dict(orig_connection_data)\n new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id,\n new_connection_data['target_iqn'] = new_iqn\n\n def fake_init_conn_with_data(self, context, volume, session):\n connection_info['data'] = new_connection_data\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn_with_data)\n\n self.compute.finish_resize(self.context,\n migration=migration,\n disk_info={}, image={}, instance=instance,\n request_spec=request_spec)\n\n # assert volume attached correctly\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance['uuid'])\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # stub out detach\n def fake_detach(self, context, volume_uuid):\n volume['device_path'] = None\n volume['instance_uuid'] = None\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n\n # clean up\n self.compute.terminate_instance(self.context, instance, [])", "def convert(input_format, output_format, input_path, output_path):\n # mount the input file to /work/<filename> inside the container\n path_in = Path(input_path)\n input_abspath = path_in.absolute().__str__()\n assert_path_exists(input_abspath)\n internal_input_path = f\"/input/{path_in.name}\"\n in_mount = f\"-v {input_abspath}:{internal_input_path}\"\n # The mount for the output dir varies depending on if its a file or block device\n path_out = Path(output_path)\n if path_out.is_block_device():\n # directly map the block device to the container\n assert_path_exists(path_out)\n out_mount = f\"--device {path_out}\"\n internal_output_path = path_out\n else:\n # output is a file (or about to be), so mount the folder it exists in\n output_abspath = path_out.absolute().__str__()\n output_dir = Path(output_abspath).parent.__str__()\n assert_path_exists(output_dir)\n internal_output_dir = \"/output\"\n internal_output_path = f\"{internal_output_dir}/{path_out.name}\"\n out_mount = f\"-v {output_dir}:{internal_output_dir}\"\n name = \"qemu-img\"\n image = \"breqwatr/qemu-img:latest\"\n run = (\n f\"qemu-img convert -f {input_format} -O {output_format} \"\n f\"{internal_input_path} {internal_output_path}\"\n )\n cmd = f\"docker run -it --name {name} --rm {in_mount} {out_mount} {image} {run}\"\n shell(cmd)" ]
[ "0.62484497", "0.60517263", "0.5992571", "0.5975449", "0.5839048", "0.57297766", "0.5697807", "0.5689689", "0.56495243", "0.5641624", "0.55717117", "0.5554981", "0.5520627", "0.5518573", "0.5513627", "0.55032563", "0.5500508", "0.5494928", "0.5483127", "0.54741514", "0.5453862", "0.54325974", "0.540865", "0.53809166", "0.53717625", "0.53702945", "0.53695697", "0.5343662", "0.53416663", "0.5340691" ]
0.7928166
0
Test if artifact is not in the handlers allowlist
def test_allow_build_not_in_allowlist(self): handler = MyHandler() container = {"name": "test", "branch": "branch"} allow = handler.allow_build(ArtifactType.IMAGE, name=container["name"], branch=container["branch"]) assert not allow
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_allowlist_not_overwritten(self):\n handler = MyHandler()\n handler.name = \"RebuildImagesOnImageAdvisoryChange\"\n allowed = handler.allow_build(\n ArtifactType.IMAGE, advisory_state=\"SHIPPED_LIVE\")\n self.assertTrue(allowed)\n\n handler.name = \"foo\"\n allowed = handler.allow_build(\n ArtifactType.IMAGE, advisory_state=\"SHIPPED_LIVE\")\n self.assertFalse(allowed)", "def test_allow_build_in_allowlist(self):\n handler = MyHandler()\n container = {\"name\": \"test\", \"branch\": \"branch\"}\n\n allow = handler.allow_build(ArtifactType.IMAGE,\n name=container[\"name\"],\n branch=container[\"branch\"])\n assert allow", "def _is_allowed(self, i):\n x = re.search(r\"src\\/backend\\/(.*)\\/\", self.filename)\n if not x:\n return True\n\n service = x.group(1).split(\"/\")[0]\n\n frm, imp, _ = i\n if frm == [\"backend\"]:\n return False\n if frm and frm[0] == \"backend\" and frm[1] not in {service, \"common\"}:\n return False\n return True", "def handle_disablehandler(bot, ievent):\n if not httpd:\n ievent.reply('webserver is not running')\n return\n try:\n handler = ievent.args[0]\n except IndexError:\n ievent.missing('<handler>')\n return\n try:\n del httpd.handlers[handler]\n if handler in cfg.get('showplugs'):\n cfg.remove('showplugs', handler)\n if handler not in cfg.get('denyplugs'):\n cfg.append('denyplugs', handler)\n ievent.reply('%s handler disabled' % handler)\n except KeyError:\n ievent.reply('%s handler is not enabled' % handler)", "def check_blacklist(repo):\n blacklisted = [ # NOTE: keep this list up to date!\n 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',\n 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',\n 'store-gateway', 'store-metadata', 'store-monitor', 'swarm', 'mesos-master',\n 'mesos-marathon', 'mesos-slave', 'zookeeper',\n ]\n if any(\"deis/{}\".format(c) in repo for c in blacklisted):\n raise PermissionDenied(\"Repository name {} is not allowed\".format(repo))", "def allowed_image(self, module_id):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tself.log(\"In allowed_image: \" + module_id,level=logging.DEBUG)\n\t\tcfg = self.cfg\n\t\tif self.build['ignoreimage']:\n\t\t\tself.log(\"ignoreimage == true, returning true\" + module_id,level=logging.DEBUG)\n\t\t\treturn True\n\t\tself.log(str(cfg[module_id]['shutit.core.module.allowed_images']),level=logging.DEBUG)\n\t\tif cfg[module_id]['shutit.core.module.allowed_images']:\n\t\t\t# Try allowed images as regexps\n\t\t\tfor regexp in cfg[module_id]['shutit.core.module.allowed_images']:\n\t\t\t\tif not shutit_util.check_regexp(regexp):\n\t\t\t\t\tself.fail('Illegal regexp found in allowed_images: ' + regexp) # pragma: no cover\n\t\t\t\tif re.match('^' + regexp + '$', self.target['docker_image']):\n\t\t\t\t\treturn True\n\t\treturn False", "def test_doesnt_implement_can_handle(self):\r\n self.assertRaises(NotImplementedError, Importer.can_handle, \"\")", "def handle_webdefaultdeny(bot, ievent):\n cfg.set('whitelistenable', 1)\n ievent.reply('ok')", "def test_only_add_perm(self):\n self.assertStatusCode(self.url, 403)", "def is_forbidden(self, request):\n return common.get_extension(str(request.url().toString())) in self.banned_extensions", "def test_itar_restrict_software_asset(self):\n pass", "def handle_webdefaultallow(bot, ievent):\n cfg.set('whitelistenable', 0)\n ievent.reply('ok')", "def test_itar_restrict_asset(self):\n pass", "def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files", "def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))", "def fix_has_no_advisory(self):\n fixed_in = self.fixed_artifact()\n return fixed_in and fixed_in.vendor_no_advisory", "def test_no_permission(client, mocker):\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=False\n )\n resp = client.post(reverse(\"order-fulfillment\"), data={})\n assert resp.status_code == statuses.HTTP_403_FORBIDDEN", "def supports_rescoring(self):\r\n return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())", "def test_noFilesFromAccept(self):\n return self._acceptFailureTest(ENFILE)", "def payload_is_handleable(self, payload):\n\t\tif payload.get_filename():\n\t\t\treturn True\n\t\treturn False", "def test_check_exclude_none(self):\n\n self.assertTrue(PostfixExclude([]).check(self.file_gitignore))\n self.assertTrue(PostfixExclude([]).check(self.file_py))\n self.assertTrue(PostfixExclude([]).check(self.file_authors))\n self.assertTrue(PostfixExclude([]).check(self.file__init__))\n self.assertTrue(PostfixExclude([]).check(self.file_bin))", "async def test_denylist(hass, mock_client):\n handler_method = await _setup(\n hass,\n {\n \"exclude_domains\": [\"fake\"],\n \"exclude_entity_globs\": [\"test.excluded_*\"],\n \"exclude_entities\": [\"not_real.excluded\"],\n },\n )\n\n tests = [\n FilterTest(\"fake.excluded\", False),\n FilterTest(\"light.included\", True),\n FilterTest(\"test.excluded_test\", False),\n FilterTest(\"test.included_test\", True),\n FilterTest(\"not_real.included\", True),\n FilterTest(\"not_real.excluded\", False),\n ]\n\n for test in tests:\n event = make_event(test.id)\n handler_method(event)\n\n was_called = mock_client.labels.call_count == 1\n assert test.should_pass == was_called\n mock_client.labels.reset_mock()", "def not_supported(cls, website):\n if website in cls.urls.keys():\n return False\n else:\n return True", "def _is_file_handler(settings):\n FILE_HANDLERS = [\n \"logging.FileHandler\",\n \"logging.handlers.WatchedFileHanlder\",\n \"logging.handlers.BaseRotatingHandler\",\n \"logging.handlers.RotatingFileHandler\",\n \"logging.handlers.TimedRotatingFileHandler\",\n ]\n\n if settings[\"class\"] in FILE_HANDLERS:\n return True\n return False", "def __is_item_vulnerable(self, queue_item):\n\n try:\n HTTPHandler(None, queue_item)\n except Exception:\n return False\n\n if not \"html\" in queue_item.response.headers.get(\"content-type\"):\n return False\n\n if not queue_item.get_soup_response():\n return False\n\n if not self.__should_payload_execute(queue_item):\n return False\n\n if self.__verify_payload:\n if not self.__verify_queue_item(queue_item.verify_item):\n return False\n\n return True", "def collect_allowed(message):\n return True", "def acceptDrops(self) -> bool:\n ...", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def _is_always_unsatisfied(self):\n # If this is a github sha tarball, then it is always unsatisfied\n # because the url has a commit sha in it and not the version\n # number.\n url = self._req.url\n if url:\n filename = filename_from_url(url)\n if filename.endswith(ARCHIVE_EXTENSIONS):\n filename, ext = splitext(filename)\n if is_git_sha(filename):\n return True\n return False", "def can_grab(self, thing):\n return False" ]
[ "0.6424493", "0.5595852", "0.5518085", "0.5322846", "0.5233636", "0.52273697", "0.520941", "0.520649", "0.5168272", "0.5164868", "0.51314855", "0.51312107", "0.50916404", "0.50706255", "0.50691235", "0.49980858", "0.49586055", "0.4928604", "0.49190104", "0.49136826", "0.4904608", "0.49017468", "0.4901031", "0.48895338", "0.48814324", "0.4875488", "0.48753092", "0.48747215", "0.48712847", "0.48559442" ]
0.68936956
0
If there is a regex error, method will raise UnprocessableEntity error
def test_allow_build_regex_exception(self): handler = MyHandler() container = {"name": "test", "branch": "branch"} with self.assertRaises(UnprocessableEntity): handler.allow_build(ArtifactType.IMAGE, name=container["name"], branch=container["branch"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validator_regex(self, field, value):\n try:\n re.compile(value)\n except re.error:\n self._error(field, \"{} is not a valid regex\".format(value))", "def _process_not_ok_response(content, status):\n if status == codes.bad:\n length = len(content)\n err_msg = (content if length > 0 else str(status))\n raise NoSQLException('Error response: ' + err_msg)\n raise NoSQLException('Error response = ' + str(status))", "def unprocessable_entity(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_UNPROCESSABLE_ENTITY,\n 'message': ERROR_MESSAGES[STATUS_UNPROCESSABLE_ENTITY]\n }), STATUS_UNPROCESSABLE_ENTITY", "def unexpected_error(self, exception):", "def test_validate_and_write_error_pattern_raises(req):\n handle = StringIO()\n req.get('http://fake/', text=u'ID list is empty')\n r = requests.get('http://fake/')\n config = core.Config()\n\n with pytest.raises(BadPatternError):\n core._validate_and_write(r, handle, 'FAKE', config)\n\n req.get('http://fake/', text=u'Error: CEFetchPApplication::proxy_stream(): Failed to retrieve sequence: NC_405534')\n r = requests.get('http://fake/')\n with pytest.raises(BadPatternError):\n core._validate_and_write(r, handle, 'FAKE', config)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)", "def unprocessable_entity(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": 422,\n \"message\": \"Unprocessable Entity\",\n }\n )\n return response, 422", "def un_processable_422(error):\n return jsonify({\n 'success': False,\n 'message': 'request cannot be processed',\n 'error': 422\n }), 422", "def test_regex_constraint(self):\n from petstore_api.model import apple\n\n # Test with valid regex pattern.\n inst = apple.Apple(\n cultivar=\"Akane\"\n )\n assert isinstance(inst, apple.Apple)\n\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"cHiLe\"\n )\n assert isinstance(inst, apple.Apple)\n\n # Test with invalid regex pattern.\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'cultivar'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"!@#%@$#Akane\"\n )\n\n err_regex = r\"Invalid value `.+?`, must match regular expression `.+?` at \\('args\\[0\\]', 'origin'\\)\"\n with self.assertRaisesRegex(\n petstore_api.ApiValueError,\n err_regex\n ):\n inst = apple.Apple(\n cultivar=\"Golden Delicious\",\n origin=\"!@#%@$#Chile\"\n )", "def handle_error_cleaning_pipeline(raw_data, endpoint, endpoint_params):\n\n try:\n result = clean_pipeline(raw_data, endpoint, endpoint_params)\n except Exception as e:\n print(f'Error! data doesn\\'t exist while cleansing proccess running!')\n print(e)\n sys.exit(1)\n\n return result", "def test_raises_useful_exception(self):\n exp = Expression(r'inalid (\\d]', {}, [], lambda x: x)\n with self.assertRaises(exp.InvalidPattern):\n assert not exp.pattern", "def __call__(self, value):\n valid = True\n for regex in self.regexs:\n search = regex.search(value)\n valid = valid and ( search != None)\n if not valid or len(value) < self.min_length:\n raise ValidationError(self.message, code=self.code)", "def handle_marshmallow_validaton(err): # except ValidationError as err\n return jsonify(err.messages), 400 # bad request", "def _handle_code_400(text):\n return '; server response: ' + text", "def findParsingFailure(self, s):\n\n rest = s\n matches = []\n for i in range(len(self.reParts)):\n thisre = '\\s*' + self.reParts[i] + '(.*)'\n m = re.match(thisre, rest, re.VERBOSE|re.IGNORECASE)\n if not m:\n if i == 0:\n dtype = self.name\n else:\n dtype = self.dtypes[i-1][0]\n raise RuntimeError('Cannot parse field %d (%s) at: %s; previous matches: %s' % (i, dtype, rest, ';'.join(matches)))\n newRest = m.groups()[-1]\n matchedText = rest[:-len(newRest)]\n matches.append(matchedText)\n rest = newRest\n raise RuntimeError('Hunh? Failed to find parsing error in %s' % s)", "def check(self, num, line):\n\t\tif self.re.match(line):\n\t\t\treturn self.error", "def _extract_error(self, response):\r\n try:\r\n et = ElementTree.parse(response)\r\n error = et.findtext('body/pre')\r\n return error\r\n except ExpatError,e:\r\n return \"%s: %s (%d/%s)\" % (e,response.read(),response.status,response.reason)", "def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()", "def _assert_validation_error( # type: ignore[override]\n self, expected_error_substring: str\n ) -> None:\n with self.assertRaisesRegex(\n utils.ValidationError, expected_error_substring):\n self.collection.validate()", "def test_fetch_nonexist_pdbfmt(self):\n pdbid = '6SL9'\n with self.assertRaisesRegex(ValueError,\n 'The PDB ID given is only represented in mmCIF format'):\n fetch(pdbid)", "def handle_request_parsing_error(err, req, schema):\n abort(422, errors=err.messages)", "def get_regex_mismatch_error_text(field_name, source_regex):\n\n\treturn(\"Value entered for '{0}' does not match regex '{1}'\"\n\t\t .format(field_name, source_regex.pattern))", "def address_regex(self) -> Any:", "def validate_regex(cls, value: str, field: ModelField) -> str:\n if isinstance(cls.regex, str):\n if not re.compile(cls.regex).match(value):\n raise InvalidPatternValue(field_name=field.name, pattern=cls.regex)\n return value", "def process_exception(self, request, exc):\n return None", "def send_incorrect_json_bad_request():\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Syntax error',\n \"description\": 'Parsing of input JSON is unavailable'\n }}), 400)", "def regexp_error_msg(self, regexp_error_msg):\n\n self._regexp_error_msg = regexp_error_msg" ]
[ "0.5660504", "0.56014264", "0.55581367", "0.55553734", "0.54990566", "0.54236627", "0.54236627", "0.54236627", "0.5416616", "0.54129237", "0.5384286", "0.53773355", "0.5325523", "0.53215456", "0.5320322", "0.5318004", "0.5279758", "0.52679384", "0.52658635", "0.52554643", "0.52365255", "0.5224403", "0.5203271", "0.5196545", "0.5189906", "0.5174813", "0.51697123", "0.5165263", "0.5147329", "0.5141573" ]
0.6139659
0
Method to contruct a list of quadrats using a given bounding box. box the bounding box of the meshes to measure size the size of the square quadrilateral (in the units of the provided mesh) list of the quadrats that fit the bounding box
def build(self, box, size): centroid = box.centroid edge1_midpoint = get_midpoint_of_edge( box.vertices[0], box.vertices[1]) distance_to_edge1 = centroid.distance_to_xyz(edge1_midpoint) quadrat_indexes1 = range(int(distance_to_edge1 / size) + 1) edge2_midpoint = get_midpoint_of_edge( box.vertices[0], box.vertices[3]) distance_to_edge2 = centroid.distance_to_xyz(edge2_midpoint) quadrat_indexes2 = range(int(distance_to_edge2 / size) + 1) quadrats = list() for i in quadrat_indexes2: for j in quadrat_indexes1: if i == 0 and j == 0: quadrats.append(Quadrat((0, 0), size, centroid)) else: top_right = Vertex(centroid.x + i * size, centroid.y + (j * size), centroid.z) top_left = Vertex(centroid.x + i * size, centroid.y - (j * size), centroid.z) bottom_right = Vertex( centroid.x - i * size, centroid.y + (j * size), centroid.z) bottom_left = Vertex( centroid.x - i * size, centroid.y - (j * size), centroid.z) four_quadrats = [Quadrat((i, j), size, top_right), Quadrat((i, -1 * j), size, top_left), Quadrat((-1 * i, j), size, bottom_right), Quadrat((-1 * i, -1 * j), size, bottom_left)] quadrats_inside = list( filter(lambda x: box.contains(x.midpoint), four_quadrats)) quadrats += quadrats_inside # print("There are this many quadrats: " + str(len(quadrats))) return tuple(set(quadrats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox", "def _polygons_to_bboxes(polygons):\n# Build bounding boxes\n bboxes = np.empty([len(polygons), 4])\n for n, p in enumerate(polygons):\n try:\n left, bottom = np.min(p, axis = 0)\n except:\n import pdb\n pdb.set_trace()\n right, top = np.max(p, axis = 0)\n bboxes[n] = [left, bottom, right, top]\n return bboxes", "def Box(bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0), level=0, quads=True):\n if np.array(bounds).size != 6:\n raise TypeError(\n 'Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)'\n )\n src = _vtk.vtkTessellatedBoxSource()\n src.SetLevel(level)\n src.SetQuads(quads)\n src.SetBounds(bounds)\n src.Update()\n return wrap(src.GetOutput())", "def calculate_box(vertices: [[float]]) -> [float]:\n x_coords = [x[0] for x in vertices]\n y_coords = [x[1] for x in vertices]\n z_coords = [x[2] for x in vertices]\n\n return [min(x_coords), min(y_coords), min(z_coords), max(x_coords), max(y_coords), max(z_coords)]", "def quadkeys_to_bounds(quadkeys: List[str]):\n tile_bounds = [\n mercantile.bounds(mercantile.quadkey_to_tile(qk)) for qk in quadkeys\n ]\n\n minx = 180\n miny = 90\n maxx = -180\n maxy = -90\n for tb in tile_bounds:\n minx = min(minx, tb[0])\n miny = min(miny, tb[1])\n maxx = max(maxx, tb[2])\n maxy = max(maxy, tb[3])\n\n return [minx, miny, maxx, maxy]", "def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def shape_from_bounding_box(bounding_box):\n size = []\n for axs in bounding_box:\n delta = axs[1] - axs[0]\n size.append(int(delta + 0.5))\n return tuple(reversed(size))", "def __bbox2square(self, bboxes):\n height = bboxes[:, 2] - bboxes[:, 0] + 1\n width = bboxes[:, 3] - bboxes[:, 1] + 1\n side = np.maximum(width, height).T\n bboxes[:, 0] += (height - side) * 0.5\n bboxes[:, 1] += (width - side) * 0.5\n bboxes[:, 2] = np.around(bboxes[:, 0] + side - 1);\n bboxes[:, 3] = np.around(bboxes[:, 1] + side - 1);\n bboxes[:, :2] = np.around(bboxes[:, :2])\n return bboxes", "def rasterize_polygons_within_box(\n polygons: List[np.ndarray], box: np.ndarray, mask_size: int\n) -> torch.Tensor:\n # 1. Shift the polygons w.r.t the boxes\n w, h = box[2] - box[0], box[3] - box[1]\n\n polygons = copy.deepcopy(polygons)\n for p in polygons:\n p[0::2] = p[0::2] - box[0]\n p[1::2] = p[1::2] - box[1]\n\n # 2. Rescale the polygons to the new box size\n ratio_h = mask_size / max(h, 0.1)\n ratio_w = mask_size / max(w, 0.1)\n\n if ratio_h == ratio_w:\n for p in polygons:\n p *= ratio_h\n else:\n for p in polygons:\n p[0::2] *= ratio_w\n p[1::2] *= ratio_h\n\n # 3. Rasterize the polygons with coco api\n mask = polygons_to_bitmask(polygons, mask_size, mask_size)\n mask = torch.from_numpy(mask)\n return mask", "def yolo_boxes_to_corners(box_xy, box_wh):\n box_mins = box_xy - (box_wh / 2.)\n box_maxes = box_xy + (box_wh / 2.)\n\n return K.concatenate([\n box_mins[..., 1:2], # y_min\n box_mins[..., 0:1], # x_min\n box_maxes[..., 1:2], # y_max\n box_maxes[..., 0:1] # x_max\n ])", "def build_box(blcorner, trcorner):\n modelbox = [[], []]\n if blcorner != None:\n x, y = literal_eval(blcorner)\n modelbox[0].append(str(x))\n modelbox[0].append(str(y))\n else:\n raise TelemacException(\\\n '... could not find your bounding box bottom left'\n 'corner. Please use --bl option (, delimited, no '\n 'spaces).\\n\\n')\n if trcorner != None:\n x, y = literal_eval(trcorner)\n modelbox[1].append(str(x))\n modelbox[1].append(str(y))\n else:\n raise TelemacException(\\\n '... could not find your bounding box top right '\n 'corner. Please use --tr option (, delimited, no '\n 'spaces).\\n\\n')\n return modelbox", "def bounding_box(points):\n x, y, z = zip(*points)\n min_x = min(x)\n max_x = max(x)\n min_y = min(y)\n max_y = max(y)\n min_z = min(z)\n max_z = max(z)\n return [\n [min_x, min_y, min_z],\n [max_x, min_y, min_z],\n [max_x, max_y, min_z],\n [min_x, max_y, min_z],\n [min_x, min_y, max_z],\n [max_x, min_y, max_z],\n [max_x, max_y, max_z],\n [min_x, max_y, max_z],\n ]", "def createMesh(width, height):\r\n mesh = [(x,y) for x in range(0, width+1) for y in range(0,height+1)]\r\n return mesh", "def yolo2_boxes_to_corners(box_xy, box_wh):\n box_mins = box_xy - (box_wh / 2.)\n box_maxes = box_xy + (box_wh / 2.)\n\n return K.concatenate([\n box_mins[..., 1:2], # y_min\n box_mins[..., 0:1], # x_min\n box_maxes[..., 1:2], # y_max\n box_maxes[..., 0:1] # x_max\n ])", "def get_grid_mesh_coordinates(bbox, spacings=(1,1,1), dot_spacing=1, include_borderline=True):\n\n xmin,xmax,ymin,ymax,zmin,zmax = bbox\n\n xdim, ydim, zdim = (xmax+1-xmin, ymax+1-ymin, zmax+1-zmin)\n\n xs = np.arange(0, xdim, spacings[0])\n ys = np.arange(0, ydim, spacings[1])\n zs = np.arange(0, zdim, spacings[2])\n\n vol = np.zeros((ydim, xdim, zdim), np.bool)\n xs = xs.astype(np.int)\n ys = ys.astype(np.int)\n zs = zs.astype(np.int)\n xs = xs[(xs >= 0) & (xs < xdim)]\n ys = ys[(ys >= 0) & (ys < ydim)]\n zs = zs[(zs >= 0) & (zs < zdim)]\n if include_borderline:\n if 0 not in xs:\n xs = np.r_[0, xs, xdim-1]\n else:\n xs = np.r_[xs, xdim-1]\n if 0 not in ys:\n ys = np.r_[0, ys, ydim-1]\n else:\n ys = np.r_[ys, ydim-1]\n if 0 not in zs:\n zs = np.r_[0, zs, zdim-1]\n else:\n zs = np.r_[zs, zdim-1]\n for y in ys:\n vol[y, xs, ::dot_spacing] = 1\n vol[y, ::dot_spacing, zs] = 1\n for x in xs:\n vol[ys, x, ::dot_spacing] = 1\n vol[::dot_spacing, x, zs] = 1\n for z in zs:\n vol[ys, ::dot_spacing, z] = 1\n vol[::dot_spacing, xs, z] = 1\n\n ys, xs, zs = np.nonzero(vol)\n\n return np.c_[xs, ys, zs] + (xmin,ymin,zmin)", "def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]", "def box(L):\n newL = []\n STRIDE = 4 # since we're using RGBA!\n for i in range(len(L)/STRIDE):\n newL.append( L[STRIDE*i:STRIDE*i+3] ) # since we're providing RGB\n return newL", "def get_bounding_boxes(dets):\n bounding_boxes = []\n for box in dets:\n bounding_box = {'top_left_x': box.left(),\n 'top_left_y': box.top(),\n 'bottom_right_x': box.right(),\n 'bottom_right_y': box.bottom()}\n bounding_boxes.append(bounding_box)\n return bounding_boxes", "def _convert_to_square(self, bboxes):\n\n square_bboxes = torch.zeros_like(bboxes, device=self.device, dtype=torch.float32)\n x1, y1, x2, y2 = [bboxes[:, i].float() for i in range(4)]\n h = y2 - y1 + 1.0\n w = x2 - x1 + 1.0\n max_side = torch.max(h, w)\n square_bboxes[:, 0] = x1 + w*0.5 - max_side*0.5\n square_bboxes[:, 1] = y1 + h*0.5 - max_side*0.5\n square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0\n square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0\n\n square_bboxes = torch.ceil(square_bboxes + 1).int()\n return square_bboxes", "def yolo_boxes_to_corners(box_xy, box_wh):\r\n box_mins = box_xy - (box_wh / 2.)\r\n box_maxes = box_xy + (box_wh / 2.)\r\n\r\n return tf.concat([\r\n box_mins[..., 1:2], # y_min\r\n box_mins[..., 0:1], # x_min\r\n box_maxes[..., 1:2], # y_max\r\n box_maxes[..., 0:1] # x_max\r\n ], axis=-1)", "def convert(size, box):\n # TODO rewrite box to be [TL, BR] coordinates\n #pdb.set_trace()\n dw = 1./size[0]\n dh = 1./size[1]\n x = (box[0] + box[1])/2.0\n y = (box[2] + box[3])/2.0\n w = box[1] - box[0]\n h = box[3] - box[2]\n x = x*dw\n w = w*dw\n y = y*dh\n h = h*dh\n return (x,y,w,h)", "def add_whitespace(bounding_box: list, border: int = 5) -> list:\n assert len(bounding_box) == 4, \"Bounding box can only have 4 corners\"\n\n larger_box = []\n for i, corner in enumerate(bounding_box):\n if i < 2:\n larger_box.append(corner - border)\n else:\n larger_box.append(corner + border)\n\n return larger_box", "def bbox(bbox = [(-1, -1), (3, 4)], layer = 0):\n D = Device(name = 'bbox')\n (a,b), (c,d) = bbox\n points = ((a,b), (c,b), (c,d), (a,d))\n D.add_polygon(points, layer = layer)\n return D", "def bounding_box(vertices, (height, width), extend=5):\n x_min = min(x for x, y in vertices) - extend\n x_max = max(x for x, y in vertices) + extend\n y_min = min(y for x, y in vertices) - extend\n y_max = max(y for x, y in vertices) + extend\n \n return max(x_min, 0), min(x_max, width), max(y_min, 0), min(y_max, height)", "def _initialise_aabb(self):\n # calculate the bounding box for all tetraherdon in the mesh\n # find the min/max extents for xyz\n tetra_bb = np.zeros((self.elements.shape[0], 19, 3))\n minx = np.min(self.nodes[self.elements[:, :4], 0], axis=1)\n maxx = np.max(self.nodes[self.elements[:, :4], 0], axis=1)\n miny = np.min(self.nodes[self.elements[:, :4], 1], axis=1)\n maxy = np.max(self.nodes[self.elements[:, :4], 1], axis=1)\n minz = np.min(self.nodes[self.elements[:, :4], 2], axis=1)\n maxz = np.max(self.nodes[self.elements[:, :4], 2], axis=1)\n cell_indexes = self.aabb_grid.global_index_to_cell_index(\n np.arange(self.aabb_grid.n_elements)\n )\n corners = self.aabb_grid.cell_corner_indexes(cell_indexes)\n positions = self.aabb_grid.node_indexes_to_position(corners)\n ## Because we known the node orders just select min/max from each\n # coordinate. Use these to check whether the tetra is in the cell\n x_boundary = positions[:, [0, 1], 0]\n y_boundary = positions[:, [0, 2], 1]\n z_boundary = positions[:, [0, 6], 2]\n a = np.logical_and(\n minx[None, :] > x_boundary[:, None, 0],\n minx[None, :] < x_boundary[:, None, 1],\n ) # min point between cell\n b = np.logical_and(\n maxx[None, :] < x_boundary[:, None, 1],\n maxx[None, :] > x_boundary[:, None, 0],\n ) # max point between cell\n c = np.logical_and(\n minx[None, :] < x_boundary[:, None, 0],\n maxx[None, :] > x_boundary[:, None, 0],\n ) # min point < than cell & max point > cell\n\n x_logic = np.logical_or(np.logical_or(a, b), c)\n\n a = np.logical_and(\n miny[None, :] > y_boundary[:, None, 0],\n miny[None, :] < y_boundary[:, None, 1],\n ) # min point between cell\n b = np.logical_and(\n maxy[None, :] < y_boundary[:, None, 1],\n maxy[None, :] > y_boundary[:, None, 0],\n ) # max point between cell\n c = np.logical_and(\n miny[None, :] < y_boundary[:, None, 0],\n maxy[None, :] > y_boundary[:, None, 0],\n ) # min point < than cell & max point > cell\n\n y_logic = np.logical_or(np.logical_or(a, b), c)\n\n a = np.logical_and(\n minz[None, :] > z_boundary[:, None, 0],\n minz[None, :] < z_boundary[:, None, 1],\n ) # min point between cell\n b = np.logical_and(\n maxz[None, :] < z_boundary[:, None, 1],\n maxz[None, :] > z_boundary[:, None, 0],\n ) # max point between cell\n c = np.logical_and(\n minz[None, :] < z_boundary[:, None, 0],\n maxz[None, :] > z_boundary[:, None, 0],\n ) # min point < than cell & max point > cell\n\n z_logic = np.logical_or(np.logical_or(a, b), c)\n logic = np.logical_and(x_logic, y_logic)\n logic = np.logical_and(logic, z_logic)\n\n self.aabb_table = csr_matrix(logic)", "def _makequads_all(self):\n nholes = self.ctrs.shape[0]\n qlist = []\n for i in range(nholes):\n for j in range(nholes):\n for k in range(nholes):\n for q in range(nholes):\n if i < j and j < k and k < q:\n qlist.append((i, j, k, q))\n qarray = np.array(qlist).astype(np.int)\n if self.verbose:\n print(\"qarray\", qarray.shape, \"\\n\", qarray)\n qname = []\n uvwlist = []\n # foreach row of 3 elts...\n for quad in qarray:\n qname.append(\"{0:d}_{1:d}_{2:d}_{3:d}\".format(\n quad[0], quad[1], quad[2], quad[3]))\n if self.verbose:\n print('quad:', quad, qname[-1])\n uvwlist.append((self.ctrs[quad[0]] - self.ctrs[quad[1]],\n self.ctrs[quad[1]] - self.ctrs[quad[2]],\n self.ctrs[quad[2]] - self.ctrs[quad[3]]))\n if self.verbose:\n print(qarray.shape, np.array(uvwlist).shape)\n return qarray, np.array(uvwlist)", "def convert_bbox_to_z(bbox):\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n x = bbox[0] + w/2.\n y = bbox[1] + h/2.\n s = w * h #scale is just area\n r = w / float(h)\n return np.array([x, y, s, r]).reshape((4, 1))", "def grid(self, northeast, southwest, density=200):\n grid = []\n\n # Determine the bounds of a standard, positive quadrant plot\n y_max, y_min = int(northeast[0]), int(southwest[0])\n x_max, x_min = int(northeast[1]), int(southwest[1])\n\n # Construct a sequence of boxes each moving clockwise from southwest corner\n master = []\n for x in range(x_min, x_max, density):\n for y in range(y_min, y_max, density):\n polygon = [\n (x, y),\n (x, y + density),\n (x + density, y + density),\n (x + density, y),\n (x, y)\n ]\n master.append(polygon)\n\n return master", "def square_grid(\n bbox: List[float], n_cells: Union[int, float], options: Dict = {},\n) -> FeatureCollection:\n\n return rectangle_grid(bbox, n_cells, n_cells, options)", "def get_boxes(self):\r\n\r\n boxes = [(\" \", self.worldbox.tl, self.worldbox.br)]\r\n# boxes = []\r\n boxes += [(\".\", b.tl, b.br) for b in self.wallboxes]\r\n boxes += [(\"x\", b.tl, b.br) for b in self.targetboxes]\r\n agentscale = 100\r\n boxes += [(\"a\", (self.i_state[0] - self.dx * agentscale, self.i_state[1] - self.dx * agentscale),\r\n (self.i_state[0] + self.dx * agentscale, self.i_state[1] + self.dx * agentscale))]\r\n return boxes" ]
[ "0.5859859", "0.57153344", "0.5666255", "0.5610459", "0.55955267", "0.558588", "0.5577103", "0.5567914", "0.54994035", "0.5485716", "0.54494894", "0.54238886", "0.54005504", "0.53592753", "0.534929", "0.53394306", "0.5338785", "0.5329687", "0.5327499", "0.5321613", "0.5279145", "0.5278066", "0.52693844", "0.5265118", "0.5258306", "0.52449423", "0.5242103", "0.52328306", "0.52243686", "0.52196395" ]
0.6905943
0
Build offset regression head for each joint
def _make_separete_regression_head(self, layer_config) -> Tuple[nn.ModuleList, nn.ModuleList]: offset_feature_layers = [] offset_final_layer = [] for _ in range(self.num_joints): feature_conv = self._make_layer( blocks_dict[layer_config["BLOCK"]], layer_config["NUM_CHANNELS_PERKPT"], layer_config["NUM_CHANNELS_PERKPT"], layer_config["NUM_BLOCKS"], dilation=layer_config["DILATION_RATE"], ) offset_feature_layers.append(feature_conv) offset_conv = nn.Conv2d( in_channels=layer_config["NUM_CHANNELS_PERKPT"], out_channels=2, kernel_size=self.spec.FINAL_CONV_KERNEL, stride=1, padding=1 if self.spec.FINAL_CONV_KERNEL == 3 else 0, ) offset_final_layer.append(offset_conv) return nn.ModuleList(offset_feature_layers), nn.ModuleList(offset_final_layer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_fun(params, slope, x):\n w = params['w']\n t0 = params['t0']\n offset = params['offset']\n return offset + slope * jax.nn.sigmoid(jnp.dot(x, w) - t0)", "def get_offset_model(species_diff_table):\n m_init = modeling.polynomial.Polynomial2D(2)\n fit = modeling.fitting.LevMarLSQFitter()\n xx, yy = species_diff_table['vu'], species_diff_table['Ju']\n zz = species_diff_table['Splat-Barton']\n model_fit = fit(m_init, xx, yy, zz)\n return model_fit", "def linearize(self, params, unknowns, resids):\n\n m = self.slope\n J = {}\n\n J['y', 'x'] = m\n return J", "def _regression_head(label_name=None,\n weight_column_name=None,\n label_dimension=1,\n enable_centered_bias=False,\n head_name=None):\n return _RegressionHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name)", "def __init__(self):\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False", "def nnRegression(data):", "def calculate_module_offsets(self):\n \n # These aren't for instantiating, but we use them to get the dimensions\n self.poly_contact_offset = vector(0.5*contact.poly.width,0.5*contact.poly.height)\n\n # M1/M2 routing pitch is based on contacted pitch\n self.m1_pitch = max(contact.m1m2.width,contact.m1m2.height) + max(self.m1_space,self.m2_space)\n self.m2_pitch = max(contact.m2m3.width,contact.m2m3.height) + max(self.m2_space,self.m3_space)\n \n # This corrects the offset pitch difference between M2 and M1\n self.offset_fix = vector(0.5*(self.m2_width-self.m1_width),0)\n\n # delay chain will be rotated 90, so move it over a width\n # we move it up a inv height just for some routing room\n self.rbl_inv_offset = vector(self.delay_chain.height, self.inv.width)\n # access TX goes right on top of inverter, leave space for an inverter which is\n # about the same as a TX. We'll need to add rails though.\n self.access_tx_offset = vector(1.25*self.inv.height,self.rbl_inv_offset.y) + vector(0,2.5*self.inv.width)\n self.delay_chain_offset = self.rbl_inv_offset + vector(0,4*self.inv.width)\n\n # Replica bitline and such are not rotated, but they must be placed far enough\n # away from the delay chain/inverter with space for three M2 tracks\n self.bitcell_offset = self.rbl_inv_offset + vector(2*self.m2_pitch, 0) + vector(0, self.bitcell.height + self.inv.width)\n\n self.rbl_offset = self.bitcell_offset\n\n \n self.height = self.rbl_offset.y + self.rbl.height + self.m2_pitch\n self.width = self.rbl_offset.x + self.bitcell.width", "def _construct_train_joint(self):\n # setup some symbolic variables for theano to deal with\n xi = T.matrix()\n xo = T.matrix()\n br = T.lscalar()\n # collect the outputs to return from this function\n outputs = [self.joint_cost, self.nll_cost, self.kld_cost, \\\n self.reg_cost, self.obs_costs]\n # compile the theano function\n _, hi_zmuv = self._construct_zmuv_samples(xi, br)\n func = theano.function(inputs=[ xi, xo, br ], \\\n outputs=outputs, \\\n givens={ self.x_in: xi.repeat(br, axis=0), \\\n self.x_out: xo.repeat(br, axis=0), \\\n self.hi_zmuv: hi_zmuv }, \\\n updates=self.joint_updates)\n return func", "def offset(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"offset(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n if value is not None:\n series[i] = value + factor\n return seriesList", "def o_wo_per_head(self):\n assert self.ff % self.heads == 0\n # fuse ff->e and projection layer of self-attention\n return (self.ff // (self.heads-self.padded_heads)) + self.qkv", "def unroll(self, trajectory):\n # We omit the details of network inference here.\n return policy_logits, baselines", "def _some_variables(use_posInd=False):\n\n parent = (\n np.array(\n [\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 1,\n 7,\n 8,\n 9,\n 10,\n 1,\n 12,\n 13,\n 14,\n 15,\n 13,\n 17,\n 18,\n 19,\n 20,\n 21,\n 20,\n 23,\n 13,\n 25,\n 26,\n 27,\n 28,\n 29,\n 28,\n 31,\n ]\n )\n - 1\n )\n\n offset = np.array(\n [\n 0.000000,\n 0.000000,\n 0.000000,\n -132.948591,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894612,\n 0.000000,\n 0.000000,\n -454.206447,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767078,\n 0.000000,\n 0.000000,\n 74.999437,\n 132.948826,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894413,\n 0.000000,\n 0.000000,\n -454.206590,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767426,\n 0.000000,\n 0.000000,\n 74.999948,\n 0.000000,\n 0.100000,\n 0.000000,\n 0.000000,\n 233.383263,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 121.134938,\n 0.000000,\n 0.000000,\n 115.002227,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.034226,\n 0.000000,\n 0.000000,\n 278.882773,\n 0.000000,\n 0.000000,\n 251.733451,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999627,\n 0.000000,\n 100.000188,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.031437,\n 0.000000,\n 0.000000,\n 278.892924,\n 0.000000,\n 0.000000,\n 251.728680,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999888,\n 0.000000,\n 137.499922,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n ]\n )\n\n offset = offset.reshape(-1, 3)\n\n rotInd = [\n [5, 6, 4],\n [8, 9, 7],\n [11, 12, 10],\n [14, 15, 13],\n [17, 18, 16],\n [],\n [20, 21, 19],\n [23, 24, 22],\n [26, 27, 25],\n [29, 30, 28],\n [],\n [32, 33, 31],\n [35, 36, 34],\n [38, 39, 37],\n [41, 42, 40],\n [],\n [44, 45, 43],\n [47, 48, 46],\n [50, 51, 49],\n [53, 54, 52],\n [56, 57, 55],\n [],\n [59, 60, 58],\n [],\n [62, 63, 61],\n [65, 66, 64],\n [68, 69, 67],\n [71, 72, 70],\n [74, 75, 73],\n [],\n [77, 78, 76],\n [],\n ]\n\n # definitions are originating from matlab file --> bring them to zero based indexing\n rotInd = [[e - 1 for e in s if len(s) > 0] for s in rotInd]\n posInd = [0, 1, 2] if use_posInd else None\n\n expmapInd = np.split(np.arange(4, 100) - 1, 32)\n\n return parent, offset, rotInd, expmapInd, posInd", "def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def concatenate_offset(self, X):\n return np.c_[np.ones((X.shape[0], 1)), X]", "def predict(self, xs, **kwargs):", "def offset(x, y, L):\n length = x.size\n offsetx = np.zeros((length, 2))\n offsety = np.zeros((length, 2))\n dx = np.zeros(length-1)\n dy = np.zeros(length-1)\n dxL = np.zeros(length-1)\n dyL = np.zeros(length-1)\n xl = np.zeros(length) # counterclockwise\n xr = np.zeros(length) # clockwise\n yl = np.zeros(length)\n yr = np.zeros(length)\n xl0 = np.zeros(length)\n xr0 = np.zeros(length)\n yl0 = np.zeros(length)\n yr0 = np.zeros(length) \n for i in range(0, length-1):\n dx[i] = x[i+1]-x[i]\n dy[i] = y[i+1]-y[i]\n for i in range(0, length-1):\n r = np.sqrt(dx[i]**2 + dy[i]**2)\n dxL[i] = dx[i]*L/r\n dyL[i] = dy[i]*L/r\n xl0[i] = -dyL[i] + x[i]\n yl0[i] = dxL[i] + y[i]\n xr0[i] = dyL[i] + x[i]\n yr0[i] = -dxL[i] + y[i]\n xl0[length-1] = xl0[length-2] + dx[length-2]\n yl0[length-1] = yl0[length-2] + dy[length-2]\n xr0[length-1] = xr0[length-2] + dx[length-2]\n yr0[length-1] = yr0[length-2] + dy[length-2]\n xl[0] = xl0[0]\n yl[0] = yl0[0]\n xl[length-1] = xl0[length-1]\n yl[length-1] = yl0[length-1]\n xr[0] = xr0[0]\n yr[0] = yr0[0]\n xr[length-1] = xr0[length-1]\n yr[length-1] = yr0[length-1]\n for i in range(1, length-1):\n a = np.array([[dy[i-1], -dx[i-1]], [dy[i], -dx[i]]])\n bl = np.array([dy[i-1]*xl0[i-1]-dx[i-1]*yl0[i-1], dy[i]*xl0[i]-dx[i]*yl0[i]])\n br = np.array([dy[i-1]*xr0[i-1]-dx[i-1]*yr0[i-1], dy[i]*xr0[i]-dx[i]*yr0[i]])\n theta = (dx[i-1]*dx[i]+dy[i-1]*dy[i])/(dx[i-1]**2+dy[i-1]**2)**0.5/(dx[i]**2+dy[i]**2)**0.5\n if theta > 1 - 1e-10:\n xl[i] = xl0[i]\n yl[i] = yl0[i]\n xr[i] = xr0[i]\n yr[i] = yr0[i]\n else:\n pl = np.linalg.solve(a, bl)\n xl[i] = pl[0]\n yl[i] = pl[1]\n pr = np.linalg.solve(a, br)\n xr[i] = pr[0]\n yr[i] = pr[1]\n offsetx[:, 0], offsetx[:, 1] = xl, xr\n offsety[:, 0], offsety[:, 1] = yl, yr\n return offsetx, offsety", "def __init__(self, slope):\n self.slope = slope", "def __init__(self, model, line, segments = None,head_target = 0,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n\r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n self.line_raw = copy.copy(line)\r\n if segments is None:\r\n self.segments = line.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than base number of segments \"+str(line.shape[0]-1)+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]-1:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # ---------------------------------------------------------------------\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n \r\n self.zc = []\r\n self.segment_nvec = []\r\n self.L = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n self.zc = np.asarray(self.zc)\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n self.L = np.asarray(self.L)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def __init__(self,\n label_name,\n weight_column_name,\n label_dimension,\n enable_centered_bias,\n head_name,\n loss_fn=_mean_squared_loss):\n super(_RegressionHead, self).__init__(head_name=head_name)\n\n self._loss_fn = loss_fn\n self._logits_dimension = label_dimension\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._enable_centered_bias = enable_centered_bias\n self._problem_type = constants.ProblemType.LINEAR_REGRESSION", "def encodeOffset(matched, priors, variances):\r\n\r\n # dist b/t match center and prior's center\r\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\r\n # encode variance\r\n g_cxcy /= (variances[0] * priors[:, 2:])\r\n # match wh / prior wh\r\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\r\n g_wh = torch.log(g_wh) / variances[1]\r\n # return target for smooth_l1_loss\r\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\r", "def get_preds_lin_reg(df, target_col, N, pred_min, offset):\n # Create linear regression object\n regr = LinearRegression(fit_intercept=True)\n\n pred_list = []\n\n for i in range(offset, len(df['daily'])):\n X_train = np.array(range(len(df['daily'][i-N:i]))) # e.g. [0 1 2 3 4]\n y_train = np.array(df['daily'][i-N:i]) # e.g. [2944 3088 3226 3335 3436]\n X_train = X_train.reshape(-1, 1) # e.g X_train = \n # [[0]\n # [1]\n # [2]\n # [3]\n # [4]]\n # X_train = np.c_[np.ones(N), X_train] # add a column\n y_train = y_train.reshape(-1, 1)\n # print X_train.shape\n # print y_train.shape\n # print 'X_train = \\n' + str(X_train)\n # print 'y_train = \\n' + str(y_train)\n regr.fit(X_train, y_train) # Train the model\n pred = regr.predict(np.array(N).reshape(1,-1))\n \n pred_list.append(pred[0][0]) # Predict the footfall using the model\n \n # If the values are < pred_min, set it to be pred_min\n pred_list = np.array(pred_list)\n pred_list[pred_list < pred_min] = pred_min\n \n return pred_list", "def learn(self):\r\n \r\n # unpack\r\n X = self.Train.X\r\n Y = self.Train.Y\r\n DY = self.Train.DY\r\n \r\n NX ,ND = X.shape\r\n NDY,_ = DY.shape\r\n \r\n print 'Build Information Matricies ...'\r\n \r\n # functions\r\n ay0 = np.array([[1.]]*NX)\r\n ay1 = X\r\n ay2 = np.reshape( np.einsum('ij,ik->ijk',X,X) , [-1,ND*ND] )\r\n\r\n # reduce redundant basis variables\r\n i_doub = np.tri(ND,k=-1).T == 1\r\n ay2[:,i_doub.ravel()] = ay2[:,i_doub.ravel()] * 2. \r\n i_keep = np.tri(ND,k=0).T == 1\r\n ay2 = ay2[:,i_keep.ravel()]\r\n\r\n # basis matrix, functions\r\n Ay = np.hstack([ay0,ay1,ay2])\r\n \r\n # arrays for the least squares regression\r\n At = Ay\r\n Yt = Y\r\n \r\n # gradients\r\n if NDY:\r\n ad0 = np.array([[0.]]*NX*ND)\r\n \r\n ad1 = np.tile( np.eye(ND) , [NX,1] )\r\n \r\n ad2a = np.repeat( np.eye(ND)[:,None,:] , ND , 1 )\r\n ad2a = np.reshape( ad2a , [-1,ND*ND] ) \r\n ad2a = np.repeat( ad2a, NX, axis=0 ) * np.repeat( np.tile( X, [ND,1] ) , ND, axis=1 )\r\n \r\n ad2b = np.repeat( np.eye(ND)[:,:,None] , ND , 2 )\r\n ad2b = np.reshape( ad2b , [-1,ND*ND] ) \r\n ad2b = np.repeat( ad2b, NX, axis=0 ) * np.tile( np.tile( X, [ND,1] ) , [1,ND] )\r\n \r\n ad2 = ad2a + ad2b\r\n \r\n # reduce redundant bases\r\n ad2[:,i_doub.ravel()] = ad2[:,i_doub.ravel()] * 2.\r\n ad2 = ad2[:,i_keep.ravel()] \r\n \r\n Ad = np.hstack([ad0,ad1,ad2])\r\n \r\n # add to arrays for least squares regression\r\n At = np.vstack([At,Ad])\r\n Yt = np.vstack([Yt, np.ravel(DY.T)[:,None]])\r\n \r\n print 'Least Squares Solve ...'\r\n B = sp.linalg.lstsq(At,Yt)[0] \r\n \r\n # unpack data\r\n c = B[0,0]\r\n b = B[1:ND+1]\r\n \r\n A = np.zeros([ND,ND])\r\n A[i_keep] = B[ND+1:,0]\r\n A[i_keep.T] = A.T[i_keep.T]\r\n \r\n # problem forumulation\r\n A = A*2.\r\n \r\n # store results\r\n self.c = c\r\n self.b = b\r\n self.A = A\r\n \r\n print ''", "def offset_all(x, y, beck_bed, t):\n length = x.size\n xyz1 = np.zeros((length, 3))\n xyz2 = np.zeros((length, 3))\n xyz1[:, 0] = np.copy(x)\n xyz1[:, 1] = np.copy(y)\n xyz1[:, 2] = np.copy(beck_bed[:, NUM])\n allxyz = np.copy(xyz1)\n offsetx = np.zeros((length, 2))\n offsety = np.zeros((length, 2))\n for i in range(NUM-1, -1, -1):\n \"\"\"Offset distance L is looping from INTERVAL to B.\"\"\"\n if np.mod(t, LPRINT) == 0:\n if i == NUM - 1:\n extr = '...(innermost)'\n elif i == 0:\n extr = '...(outermost)'\n else:\n extr = '...'\n print('+> Offsetting Polyline #'\n + str(i+1) + ' & #' + str(2*NUM+1-i) + extr, end='')\n offsetx, offsety = offset(x, y, WIDTH/2-i*INTERVAL)\n if i == 0 and SAVEBOUND and t == 0:\n t1 = np.copy(offsetx)\n t2 = np.copy(offsetx)\n t1[:,0] = np.copy(offsetx[:, 0])\n t1[:,1] = np.copy(offsety[:, 0])\n t2[:,0] = np.copy(offsetx[:, 1])\n t2[:,1] = np.copy(offsety[:, 1])\n t3 = np.concatenate((t1, t2[::-1], np.array([t1[0, :]])), axis=0)\n np.savetxt(FNAME.rsplit('.', 1)[0] + '_boundary.i2s', t3, fmt='%.6e')\n xyz1[:, 0] = offsetx[:, 0]\n xyz1[:, 1] = offsety[:, 0]\n xyz1[:, 2] = beck_bed[:, -1-i]\n xyz2[:, 0] = offsetx[:, 1]\n xyz2[:, 1] = offsety[:, 1]\n xyz2[:, 2] = beck_bed[:, i]\n allxyz = np.concatenate((allxyz, xyz1, xyz2), axis=0)\n if np.mod(t, LPRINT) == 0:\n print(' [done]')\n if i == 0 and np.mod(t, LPRINT) == 0:\n print(' * Note: Polyline #' + str(NUM + 1) + ' is centerline')\n return allxyz", "def elbow_with_synthetic_data():\n delta = 0.1\n slope_2 = 2\n slope_3 = 3\n break_pt = 5\n intercept_2 = 0.0\n line_2 = np.arange(0, break_pt, delta) * slope_2 + intercept_2\n line_3 = (\n np.arange(break_pt, break_pt * 2, delta) * slope_3\n + (slope_2 - slope_3) * break_pt\n )\n x = np.arange(0, break_pt * 2, delta)\n y = np.concatenate((line_2, line_3))\n break_pt = break_pt\n\n return x, y, break_pt", "def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)", "def datasetratiocopy_xr_extend(l,ratio,x_offset,y_offset):#只延伸上下两边以及右边的点\r\n dataset=[]\r\n for polyline in l:\r\n newpolyline=[]\r\n for pos in polyline:\r\n pos_x=pos[0]\r\n pos_y=pos[1]\r\n if abs((abs(pos_x)-globalconfig.X_LENGTH/2))<0.01: \r\n if pos_x>0: #judge if the pos is on the origin outline,if on outline,will be moved to the new enlarged outline and plus an extene length\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+(abs(pos_x)/pos_x*globalconfig.X_EXTENDED_LENGTH)+x_offset\r\n else:\r\n pos_x=pos[0]/globalconfig.CENTER_RATIO+x_offset \r\n else:\r\n pos_x=pos[0]/ratio+x_offset\r\n if abs((abs(pos_y)-globalconfig.Y_LENGTH/2))<0.01:\r\n pos_y=pos[1]/globalconfig.CENTER_RATIO+(abs(pos_y)/pos_y*globalconfig.Y_EXTENDED_LENGTH)+y_offset\r\n else:\r\n pos_y=pos[1]/ratio+y_offset \r\n newpolyline.append([pos_x,pos_y])\r\n dataset.append(newpolyline)\r\n return dataset", "def build_model(self, X: pd.DataFrame, y: pd.DataFrame = None) -> pm.Model:\n idx = X.index\n \n if y is None:\n y = pd.Series(0, index=idx)\n elif self.oversample: # only if y is given\n n_pos = (y == 1).sum()\n n_neg = (y == 0).sum()\n to_add = int(np.ceil(n_neg/n_pos) - 1)\n # print(n_pos, n_neg, to_add)\n if to_add > 4:\n to_add = 4\n for i in range(to_add):\n idx = idx.append(y[y==1].index)\n X = X.loc[idx]\n y = y.loc[idx]\n \n A = X[self.v_known + self.v_oob_bio]\n B_vals = X[self.v_fuzzy]\n B_mask = (B_vals == -1).astype(int)\n C_raw = X[self.v_float_adm + self.v_float_bio]\n # C_scaled = (C_raw - self.C_mean_) / self.C_std_ \n C_scaled = np.log1p(C_raw/self.C_mean_)\n C_scaled[~np.isfinite(C_scaled)] = np.nan\n C_vals = C_scaled.fillna(0)\n C_mask = C_scaled.isnull().astype(int)\n \n coords = {\"idx\": idx, \"a\": A.columns, \"b\": B_vals.columns, \"c\": C_vals.columns}\n with pm.Model(coords=coords) as m:\n pm.Data(\"A\", A, dims=[\"idx\", \"a\"])\n pm.Data(\"B_vals\", B_vals, dims=[\"idx\", \"b\"])\n pm.Data(\"B_mask\", B_mask, dims=[\"idx\", \"b\"])\n pm.Data(\"C_vals\", C_vals, dims=[\"idx\", \"c\"])\n pm.Data(\"C_mask\", C_mask, dims=[\"idx\", \"c\"])\n pm.Data(\"y\", y, dims=[\"idx\"])\n\n pm.Normal(\"avg\", mu=0, sd=1)\n\n pm.Beta(\"h_a_incl\", alpha=1, beta=4)\n pm.Normal(\"a_coef_raw\", mu=0, sd=1, dims=[\"a\"])\n pm.Bernoulli(\"a_incl\", p=m[\"h_a_incl\"], dims=[\"a\"])\n pm.Deterministic(\"a_coef\", m['a_coef_raw'] * m['a_incl'], dims=[\"a\"])\n \n pm.Normal(\"b_vals_coef\", mu=0, sd=1, dims=[\"b\"])\n pm.Normal(\"b_mask_coef_raw\", mu=0, sd=1, dims=[\"b\"])\n pm.Beta(\"h_b_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"b_mask_incl\", p=m[\"h_b_mask_incl\"], dims=[\"b\"])\n pm.Deterministic(\"b_mask_coef\", m['b_mask_coef_raw'] * m['b_mask_incl'], dims=[\"b\"])\n \n pm.Normal(\"c_vals_coef\", mu=0, sd=1, dims=[\"c\"])\n pm.Normal(\"c_mask_coef_raw\", mu=0, sd=1, dims=[\"c\"])\n pm.Beta(\"h_c_mask_incl\", alpha=1, beta=4)\n pm.Bernoulli(\"c_mask_incl\", p=m[\"h_c_mask_incl\"], dims=[\"c\"])\n pm.Deterministic(\"c_mask_coef\", m['c_mask_coef_raw'] * m['c_mask_incl'], dims=[\"c\"])\n unprob = pm.Deterministic(\n \"logit\",\n m['avg']\n + tt.dot(m[\"A\"], m[\"a_coef\"])\n + tt.dot(m[\"B_vals\"] * (1 - m['B_mask']), m[\"b_vals_coef\"])\n + tt.dot(m[\"B_mask\"], m[\"b_mask_coef\"])\n + tt.dot(m[\"C_vals\"] * (1 - m['C_mask']), m[\"c_vals_coef\"])\n + tt.dot(m[\"C_mask\"], m[\"c_mask_coef\"])\n )\n pm.Bernoulli(\"y_pred\", p = tt.nnet.sigmoid(unprob), dims=['idx'], observed=m['y'])\n\n m.graph = pm.model_to_graphviz()\n\n return m", "def process_model(self, X, U):\n x = U[0] * sym.cos(X[2])\n y = U[0] * sym.sin(X[2])\n th = U[1]*self.offset_comp - self.bias_w\n return sym.Matrix([[x], [y], [th]])", "def create_predictors(y): # pragma: no cover\n lags = y[-1:-4:-1]\n\n return lags" ]
[ "0.55294734", "0.5208984", "0.5130402", "0.50781065", "0.50344014", "0.501487", "0.49891034", "0.49852586", "0.49625823", "0.49371865", "0.49345776", "0.49325863", "0.49302906", "0.49096087", "0.48742875", "0.48697242", "0.48597276", "0.48429585", "0.4842753", "0.4841103", "0.48193973", "0.481491", "0.48138377", "0.47920746", "0.4775954", "0.47670493", "0.47620684", "0.47592813", "0.47566742", "0.474412" ]
0.5698405
0
Set the processing parameters for the dataset.
def set_dataset_processing_params( self, edge_links: Union[np.ndarray, List[Tuple[int, int]]], edge_colors: Union[np.ndarray, List[Tuple[int, int, int]]], keypoint_colors: Union[np.ndarray, List[Tuple[int, int, int]]], image_processor: Optional[Processing] = None, conf: Optional[float] = None, ) -> None: self._edge_links = edge_links or self._edge_links self._edge_colors = edge_colors or self._edge_colors self._keypoint_colors = keypoint_colors or self._keypoint_colors self._image_processor = image_processor or self._image_processor self._default_nms_conf = conf or self._default_nms_conf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processing(self, processing):\n\n self._processing = processing", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def set_processing_attrs(self,\n image_data_generator,\n target_size,\n color_mode,\n data_format,\n interpolation,\n tfrecord,\n num_copies):\n self.image_data_generator = image_data_generator\n self.target_size = tuple(target_size)\n if color_mode not in {'rgb', 'rgba', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\", \"rgba\", or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n if self.color_mode == 'rgba':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (4,)\n else:\n self.image_shape = (4,) + self.target_size\n elif self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n\n self.interpolation = interpolation\n self.tfrecord = tfrecord\n self.num_copies = num_copies", "def preprocessing(self, preprocessing):\n\n self._preprocessing = preprocessing", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def set_parameters(self, mode, data):\n if mode == 'design' or self.local_design:\n self.new_design = True\n\n for key, dc in self.variables.items():\n if isinstance(dc, dc_cp):\n if ((mode == 'offdesign' and not self.local_design) or\n (mode == 'design' and self.local_offdesign)):\n self.get_attr(key).design = data[key]\n\n else:\n self.get_attr(key).design = np.nan", "def set_params(self):\n raise NotImplementedError", "def prepare_process(self, dataset):\n if dataset is not None:\n pass", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def set_params(self):\r\n pass", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def setProcessingPreferences(self, processingPreferences):\n self.PDFreactorConfiguration.in1[\"processingPreferences\"] = processingPreferences", "def set_data(self, dataset):\n if dataset is not None:\n self.infoa.setText('%d instances in input data set' % len(dataset))\n self.infob.setText('%d attributes in input data set' % len(dataset.domain.attributes))\n # Limited the batch size between 0.005 to 0.025, in\n # order tk=o make training fats and also accurate\n if(len(dataset) >= 200):\n self.batchsize = int(0.005 * len(dataset))\n self.batch_spin.setMinimum(int(0.005 * len(dataset)))\n self.batch_spin.setMaximum(int(0.025 * len(dataset)))\n else:\n # here the dataset is to small, hence fixed the\n # batch size programmatically\n self.batchsize = 1\n self.batch_spin.setMinimum(1)\n self.batch_spin.setMaximum(10)\n self.optionsBox.setDisabled(False)\n self.layerBox.setDisabled(False)\n self.updateLayer()\n self.dataset = dataset\n self.save_button.setDisabled(True)\n\n else:\n self.infoa.setText('No data on input yet, waiting to get something.')\n self.infob.setText('')\n self.optionsBox.setDisabled(True)\n self.layerBox.setDisabled(True)\n self.dataset = None", "def setParameters(self) -> None:\n # get a list of the header and data files in the folder\n self.headerF = glob.glob(os.path.join(self.dataPath, \"*.XTR\"))\n if len(self.headerF) == 0:\n self.headerF = glob.glob(os.path.join(self.dataPath, \"*.XTRX\"))\n self.dataF = glob.glob(os.path.join(self.dataPath, \"*.RAW\"))\n # data byte information might be different for each file\n # so it is a dictionary\n self.dataByteOffset: Dict = {}\n self.recChannels = {}\n self.dataByteSize = 4\n # data type\n self.dtype = np.float32\n # get the number of data files and header files - this should be equal\n self.numHeaderFiles: int = len(self.headerF)\n self.numDataFiles: int = len(self.dataF)", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def preprocess_dataset(self, dataset, params=None):\n if params is None:\n assert self.params_loaded, (\n \"You must either provide parameters or load the model params before preprocessing.\")\n params = self.params\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\"):\n if params.whiten_method == \"FT\": # other methods require patching first\n if hasattr(params, \"whiten_batch_size\"):\n batch_size = params.whiten_batch_size\n else:\n batch_size = None\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data_batch(dataset[key].images, method=params.whiten_method,\n batch_size=batch_size)\n print(\"INFO:preprocessing:FT Whitened \"+key+\" data\")\n if hasattr(params, \"lpf_data\") and params.lpf_data:\n dataset[key].images, dataset[key].data_mean, dataset[key].lpf_filter = \\\n dp.lpf_data(dataset[key].images, cutoff=params.lpf_cutoff)\n print(\"INFO:preprocessing:Low pass filtered \"+key+\" data\")\n if hasattr(params, \"contrast_normalize\") and params.contrast_normalize:\n if hasattr(params, \"gauss_patch_size\"):\n dataset[key].images = dp.contrast_normalize(dataset[key].images,\n params.gauss_patch_size)\n else:\n dataset[key].images = dp.contrast_normalize(dataset[key].images)\n print(\"INFO:preprocessing:Contrast normalized \"+key+\" data\")\n if hasattr(params, \"standardize_data\") and params.standardize_data:\n if params.data_type == \"mnist\":\n eps = 1e-5\n else:\n eps = None\n dataset[key].images, dataset[key].data_mean, dataset[key].data_std = \\\n dp.standardize_data(dataset[key].images, eps)\n self.data_mean = dataset[key].data_mean\n self.data_std = dataset[key].data_std\n print(\"INFO:preprocessing:Standardized \"+key+\" data\")\n if hasattr(params, \"extract_patches\") and params.extract_patches:\n assert all(key in params.__dict__.keys()\n for key in [\"num_patches\", \"patch_edge_size\", \"overlapping_patches\",\n \"randomize_patches\"]), (\"Insufficient params for patches.\")\n out_shape = (int(params.num_patches), int(params.patch_edge_size),\n int(params.patch_edge_size), dataset[key].num_channels)\n dataset[key].num_examples = out_shape[0]\n dataset[key].reset_counters()\n if hasattr(params, \"patch_variance_threshold\"):\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n params.patch_variance_threshold, dataset[key].rand_state)\n else:\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n var_thresh=0, rand_state=dataset[key].rand_state)\n dataset[key].shape = dataset[key].images.shape\n dataset[key].num_rows = dataset[key].shape[1]\n dataset[key].num_cols = dataset[key].shape[2]\n dataset[key].num_channels = dataset[key].shape[3]\n dataset[key].num_pixels = np.prod(dataset[key].shape[1:])\n print(\"INFO:preprocessing:Extracted patches from \"+key+\" data\")\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\") and params.whiten_method != \"FT\":\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data(dataset[key].images, method=params.whiten_method)\n print(\"INFO:preprocessing:Whitened \"+key+\" data\")\n if hasattr(params, \"norm_data\") and params.norm_data:\n dataset[key].images, dataset[key].data_max = dp.normalize_data_with_max(dataset[key].images)\n self.data_max = dataset[key].data_max\n print(\"INFO:preprocessing:Normalized \"+key+\" data with maximum\")\n if hasattr(params, \"rescale_data\") and params.rescale_data:\n dataset[key].images, dataset[key].data_min, dataset[key].data_max = dp.rescale_data_to_one(dataset[key].images)\n self.data_max = dataset[key].data_max\n self.data_min = dataset[key].data_min\n print(\"INFO:preprocessing:Rescaled each \"+key+\" datapoint to one\")\n if hasattr(params, \"center_data\") and params.center_data:\n dataset[key].images, dataset[key].data_mean = dp.center_data(dataset[key].images,\n use_dataset_mean=True)\n self.data_mean = dataset[key].data_mean\n print(\"INFO:preprocessing:Centered \"+key+\" data\")\n return dataset", "def set_parameters(self, params):\n self.kp = params.pgain", "def SetVariationalParameters(self, data):\n self._SetParameters(data, 'SetVariationalParameters')", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def setParams(self, paramSet):\r\n pass", "def preprocess(\n self,\n dataset: Union[str, dict, pd.DataFrame] = None,\n training_set: Union[str, dict, pd.DataFrame] = None,\n validation_set: Union[str, dict, pd.DataFrame] = None,\n test_set: Union[str, dict, pd.DataFrame] = None,\n training_set_metadata: Union[str, dict] = None,\n data_format: str = None,\n skip_save_processed_input: bool = True,\n random_seed: int = default_random_seed,\n **kwargs,\n ) -> PreprocessedDataset:\n print_boxed(\"PREPROCESSING\")\n\n for callback in self.callbacks:\n callback.on_preprocess_start(self.config_obj.to_dict())\n\n preprocessing_params = get_preprocessing_params(self.config_obj)\n\n proc_training_set = proc_validation_set = proc_test_set = None\n try:\n with provision_preprocessing_workers(self.backend):\n # TODO (Connor): Refactor to use self.config_obj\n preprocessed_data = preprocess_for_training(\n self.config_obj.to_dict(),\n dataset=dataset,\n training_set=training_set,\n validation_set=validation_set,\n test_set=test_set,\n training_set_metadata=training_set_metadata,\n data_format=data_format,\n skip_save_processed_input=skip_save_processed_input,\n preprocessing_params=preprocessing_params,\n backend=self.backend,\n random_seed=random_seed,\n callbacks=self.callbacks,\n )\n\n (proc_training_set, proc_validation_set, proc_test_set, training_set_metadata) = preprocessed_data\n\n return PreprocessedDataset(proc_training_set, proc_validation_set, proc_test_set, training_set_metadata)\n except Exception as e:\n raise RuntimeError(f\"Caught exception during model preprocessing: {str(e)}\") from e\n finally:\n for callback in self.callbacks:\n callback.on_preprocess_end(proc_training_set, proc_validation_set, proc_test_set, training_set_metadata)", "def set_parameters(self, new_param):\n\n current_idx = 0\n for idx, param in enumerate(self.__network.parameters()):\n temp_param = \\\n new_param[current_idx:current_idx + self.__net_sizes[idx]]\n temp_param = temp_param.reshape(self.__net_shapes[idx])\n param.data = tr.from_numpy(temp_param).float()\n current_idx += self.__net_sizes[idx]", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3", "def _set_parameter(self):\n # Get parameter keys\n self.input_parameter = self.parameter_combobox.currentText()\n self.result_parameter = self.result_parameters[self.input_parameter]\n # Adjust axes labels\n self.ax.set_xlabel('{} steunpunt'.format(self.input_parameter))\n self.ax.set_ylabel('{} uitvoerlocatie'.format(self.input_parameter))\n # Set data\n self._set_data()", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass" ]
[ "0.66091186", "0.65634745", "0.63424504", "0.62893957", "0.6289173", "0.6258113", "0.62301064", "0.61419135", "0.61234295", "0.6097829", "0.608046", "0.60428184", "0.59977686", "0.5968489", "0.5958951", "0.59205025", "0.5912154", "0.5910207", "0.59096277", "0.58788115", "0.58593255", "0.5856571", "0.58362156", "0.58182776", "0.5787888", "0.57828236", "0.5779724", "0.5774364", "0.5774364", "0.5774364" ]
0.7022556
0
``threshold = max(end_threshold, threshold step)`` (linear) ``threshold = max(end_threshold, step threshold)`` (exponential) where the initial threshold is set to ``start_threshold``.
def __init__(self, start_threshold, end_threshold, step, method="linear"): if start_threshold < 0 or end_threshold < 0 or step < 0: raise ValueError("Thresholds must be positive.") if start_threshold < end_threshold: raise ValueError("Start threshold must be bigger than end " "threshold.") if method == "exponential" and step > 1: raise ValueError("For exponential updating, the step parameter " "must not be explosive.") self._start_threshold = start_threshold self._end_threshold = end_threshold self._step = step self._method = method self._threshold = start_threshold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def power_e(amount, start, stop, truncated, sequence):\n ratio = .5\n for x in range(start, amount):\n y = abs(round(ratio * math.exp(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def calc_eps(self, steps):\n eps_threshold = self.EPS_END + (self.EPS_START - self.EPS_END) * \\\n math.exp(-1. * steps / self.EPS_DECAY)\n return eps_threshold", "def power_em1(amount, start, stop, truncated, sequence):\n ratio = .25\n for x in range(start, amount):\n y = abs(round(ratio * math.expm1(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def test_threshold_range_b(self):\n code, out, err = self.t.runError(\"--threshold --min 3.2 --max 3.1\")\n self.assertIn(\"The max value must be higher than the min value.\", out)", "def test_threshold_range_a(self):\n code, out, err = self.t.runError(\"--threshold --max 3.1 --min 3.2\")\n self.assertIn(\"The min value must be lower than the max value.\", out)", "def test_exp_after_log_control(self, space, base_point, end_point, atol):\n space.equip_with_metric(self.Metric)\n expected = end_point\n tangent_vec = space.metric.log(expected, base_point)\n end_point = space.metric.exp(tangent_vec, base_point)\n result = end_point\n self.assertAllClose(result, expected, atol=atol)", "def power(amount, start, stop, truncated, sequence):\n ratio = len(str(start)) + 1\n for x in range(start, amount):\n y = abs(round(ratio ** x))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def linear_fade(start, end, amount):\n level = start + (amount * (end - start))\n return level", "def altitude_range(self, start_distance, finish_distance):\n start_index, start_ratio = calculate_value_reference(self.distances,\n start_distance)\n start_alt = calculate_value(self.altitudes, start_index, start_ratio)\n finish_index, finish_ratio = calculate_value_reference(self.distances,\n finish_distance)\n finish_alt = calculate_value(self.altitudes, finish_index, finish_ratio)\n\n min_alt = min(start_alt, finish_alt)\n max_alt = max(start_alt, finish_alt)\n\n if start_index < finish_index:\n altitudes = self.altitudes[start_index + 1: finish_index + 1]\n min_alt = min(min_alt, altitudes.min())\n max_alt = max(max_alt, altitudes.max())\n\n return min_alt, max_alt", "def limit_timestep(weights, elocnew, elocold, eref, start, stop):\n if start is None or stop is None:\n return 1\n assert (\n stop > start\n ), \"stabilize weights requires stop>start. Invalid stop={0}, start={1}\".format(\n stop, start\n )\n eloc = np.stack([elocnew, elocold])\n fbet = np.amax(eref - eloc, axis=0)\n return np.clip((1 - (fbet - start)) / (stop - start), 0, 1)", "def upper_bound(self) -> float:\n ...", "def logspace(start,stop,num=50,endpoint=True,base=10.0):\n y = linspace(start,stop,num=num,endpoint=endpoint)\n return _nx.power(base,y)", "def _lerp(self, start_value, end_value):\n # @todo: can probably replace this with np.interp(self.step_lerp_pcts, [0, 1], [start_value, end_value])\n return (1.0-self.step_lerp_pcts)*start_value + self.step_lerp_pcts*end_value", "def calculate_slider_step(\n min_value: float, max_value: float, steps: int = 100\n) -> float:\n\n return 10 ** math.floor(math.log10((max_value - min_value) / steps))", "def safe_exp(w, thresh):\n\n slope = np.exp(thresh)\n with tf.variable_scope('safe_exponential'):\n lin_region = tf.to_float(w > thresh)\n\n lin_out = slope*(w - thresh + 1.)\n exp_out = tf.exp(w)\n\n out = lin_region*lin_out + (1.-lin_region)*exp_out\n return out", "def fit_to_range(val: float, a: float, b: float, a1: float, b1: float) -> float:\n new_value = ((val - a) / (b - a)) * (b1 - a1) + a1\n return new_value", "def exponential_decay(param: float, decay_factor: float, min_val: float) -> float:\n if param > min_val:\n param *= decay_factor\n param = max(param, min_val)\n return param", "def get_exponential_detection_thresholds():\n \n m = utils.MAX_DETECTION_THRESHOLD\n n = utils.NUM_DETECTION_THRESHOLDS\n y = np.exp(np.log(m) / n)\n return y ** np.arange(1, n + 1)", "def __init__(self, threshold: float = 0.3, initial_val: float = 0.0) -> None:\n self.threshold = threshold\n self.initial_val = initial_val", "def lower_bound(self) -> float:\n ...", "def linlogspace(start, stop, base=0.5, **kwargs) -> np.ndarray:\n p = (1 - np.logspace(0, 1, base=base, **kwargs)) * (1 / (1 - base))\n return (1 - p) * start + p * stop", "def safe_exp(w, thresh):\n\n slope = np.exp(thresh)\n with tf.variable_scope('safe_exponential'):\n lin_region = tf.to_float(w > thresh)\n\n lin_out = slope*(w - thresh + 1.)\n exp_out = tf.exp(w)\n\n out = lin_region*lin_out + (1.-lin_region)*exp_out\n return out", "def exponential_interval(\n initial: float = 0.1,\n multiplier: float = 2,\n maximum: Optional[float] = None,\n minimum: Optional[float] = None,\n) -> Generator[float, None, None]:\n val = initial\n while True:\n if minimum is not None and val < minimum:\n yield minimum\n if maximum is not None and val > maximum:\n yield maximum\n else:\n yield val\n val *= multiplier", "def find_best_point(self, start_i, end_i, ranges):\n max_val = 0\n target = start_i\n for i in range(start_i, end_i):\n if ranges[i] > max_val:\n target = i\n max_val = ranges[i]\n \n angle = -(540-target)*3\n return float(angle)/1080, target", "def set_progress_range(self, maximum):\r\n\r\n pass", "def get_epsilon_decay_function(e_start, e_end, decay_duration):\n return lambda frame_idx: e_end + \\\n (e_start - e_end) * np.exp(-1. * frame_idx / decay_duration)", "def logistic(x_0, max_value, midpoint, steepness):\n return max_value / (1 + math.exp(-(x_0 - midpoint) / steepness))", "def ticks(self, start, end, desired_ticks=8):\n if start > end:\n start, end = end, start\n\n if start == 0.0:\n # Whoever calls us with a value of 0.0 puts themselves at our mercy\n log_start = 1e-9\n else:\n log_start = log10(start)\n\n if end == 0.0:\n log_end = 1e-9\n else:\n log_end = log10(end)\n log_interval = log_end - log_start\n\n if log_interval < 1.0:\n # If the data is spaced by less than a factor of 10, then use\n # regular/linear ticking\n min, max, delta = heckbert_interval(start, end, desired_ticks,\n enclose=True)\n return frange(min, max, delta)\n\n elif log_interval < desired_ticks:\n magic_numbers = [1, 2, 5]\n for interval in magic_numbers:\n n1 = self._logtickceil_as_irep(start,interval)\n n2 = self._logtickfloor_as_irep(end,interval)\n ticks = [self._irep_to_value(n,interval) for n in range(n1,n2+1)]\n if len(ticks) < desired_ticks * 1.5:\n return ticks\n return ticks\n\n else:\n # Put lines at every power of ten\n startlog = ceil(log_start)\n endlog = floor(log_end)\n expticks = linspace(startlog, endlog, endlog - startlog + 1)\n return 10**expticks", "def _mask_for_values_between_exponent_limits(self):\n mask_between_exp_limits = None\n new_exp_lower = new_exp_upper = None\n if self.exp_lower is not None:\n new_exp_lower = self.exp_lower + 1\n if self.exp_upper is not None:\n new_exp_upper = self.exp_upper - 1\n if (new_exp_lower is not None) and (new_exp_upper is not None):\n if new_exp_lower <= new_exp_upper:\n mask_between_exp_limits = self.exponent.interval_mask(new_exp_lower, new_exp_upper)\n else:\n mask_between_exp_limits = self.exponent.interval_mask(new_exp_lower, new_exp_upper)\n\n return mask_between_exp_limits" ]
[ "0.6318416", "0.5703446", "0.5681435", "0.5562579", "0.5382603", "0.537008", "0.5349749", "0.5336802", "0.5329492", "0.53214836", "0.5306692", "0.5306511", "0.52809584", "0.5261724", "0.5236843", "0.52299845", "0.52246624", "0.52062243", "0.5202483", "0.51860684", "0.5146353", "0.5130747", "0.51279795", "0.51259506", "0.51196027", "0.5116174", "0.50982386", "0.5085933", "0.50738436", "0.50431305" ]
0.6057787
1
Assert equal with better error message.
def assert_eq(a, b, msg=None): assert a == b, msg or __safe_error("!=", a, b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_equal(self, first, second, msg=\"\"):\r\n assert first == second", "def assert_equal(self, first, second):\n if first != second:\n raise AssertionError('%s and %s not equal' % (str(first), str(second)))", "def _baseAssertEqual(self, first, second, msg=None):\r\n if not first == second:\r\n standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))\r\n msg = self._formatMessage(msg, standardMsg)\r\n raise self.failureException(msg)", "def verify_equal(self, first, second, msg=\"\"):\r\n try:\r\n self.assert_equal(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def assert_not_equal(self, first, second, msg=\"\"):\r\n assert first != second", "def test_equal(self):\n self.assertTrue(self.a == self.a)\n self.assertFalse(self.a != self.a)", "def assert_equal(left, right):\n msg = \"{} != {}\".format(left, right)\n assert left == right, msg", "def eq_(a, b, msg=None):\n if not a == b:\n raise AssertionError(msg or \"%r != %r\" % (a, b))", "def eq_(a, b, msg=None):\n assert a == b, msg or \"%r != %r\" % (a, b)", "def eq_(a, b, msg=None):\n assert a == b, msg or \"%r != %r\" % (a, b)", "def eq_(a, b, msg=None):\r\n if not a == b:\r\n raise AssertionError(msg or \"%r != %r\" % (a, b))", "def eq_msg(a, b, msg=None):\n assert a == b, (str(msg) or '') + ' (%r != %r)' % (a, b)", "def testEquality(self):\n pass", "def assert_not_equal(self, first, second):\n if not first != second:\n raise AssertionError('%s and %s is equal' % (str(first), str(second)))", "def equality():\r\n\r\n Assert(1) == 1\r\n Assert(1) != 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) == 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) != 1", "def assert_equals(expected,received,message=None):\n if (expected != received):\n if message is None:\n message = 'assert_equals: expected %s but instead got %s' % (repr(expected),repr(received))\n quit_with_error(message)", "def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"", "def verify_not_equal(self, first, second, msg=\"\"):\r\n try:\r\n self.assert_not_equal(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def expected_value(expected, actual):\n assert expected == actual", "def equality():\n\n Assert(1) == 1\n Assert(1) != 0\n\n with Assert.raises(AssertionError):\n Assert(1) == 0\n\n with Assert.raises(AssertionError):\n Assert(1) != 1", "def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)", "def assertEqual(self, first, second, msg=None):\r\n assertion_func = self._getAssertEqualityFunc(first, second)\r\n assertion_func(first, second, msg=msg)", "def eq_(thing1, thing2, msg = None):\n return nose.tools.eq_(thing1, thing2,\n msg = (msg if msg != None else \"'%s' != '%s'\" % (thing1, thing2))\n )", "def assertNotEqual(self, first, second, msg=None):\r\n if not first != second:\r\n msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), \r\n safe_repr(second)))\r\n raise self.failureException(msg)", "def test_eq_invalid(self):\n self.assertFalse(self.instance == '123')", "def testEqual(a, b):\n if a == b:\n print('Pass')\n else:\n print('Fail')", "def test_eq(self):\n\n self.assertEqual(\n description.BaseDescription('/path/to/local'),\n description.BaseDescription('/path/to/local'),\n 'equality between two descriptions'\n )\n\n self.assertNotEqual(\n description.BaseDescription('/path/to/local/a'),\n description.BaseDescription('/path/to/local/b'),\n 'inequality between two descriptions'\n )", "def assertOutput(cls, expected, actual):\n if expected != actual:\n raise Exception(\"'\" + expected + \"' != '\" + actual + \"'\")", "def test_almost_equal(self):\n x = Point(\n lat=23.4,\n lng=23.1,\n author=self.u\n )\n self.assertTrue(self.a == x)\n self.assertFalse(self.a != x)", "def test_eq(self):\n dummy = DummyCryptographicObject()\n self.assertTrue(dummy == dummy)" ]
[ "0.8381916", "0.7973827", "0.7879367", "0.7826144", "0.7825422", "0.76669437", "0.7661126", "0.7659392", "0.7640886", "0.7640886", "0.76360196", "0.7526258", "0.7519615", "0.73260635", "0.7317758", "0.7315744", "0.72970676", "0.7290986", "0.7287053", "0.7247238", "0.7233387", "0.72058976", "0.70207494", "0.69481885", "0.69236356", "0.692189", "0.6912927", "0.6875933", "0.6845397", "0.6839174" ]
0.8015735
1
Raise a type error if obj is not an instance of cls.
def assert_type( obj, cls, msg="{obj} ({obj!r}) should be a {cls}, not {objcls}" ): if not isinstance(obj, cls): raise TypeError(msg.format(obj=obj, objcls=type(obj), cls=cls))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_is_instance(self, obj, cls, msg=\"\"):\r\n assert isinstance(obj, cls)", "def assert_is_not_instance(self, obj, cls, msg=\"\"):\r\n assert not isinstance(obj, cls)", "def assertIsInstance(self, obj, cls, msg=None):\r\n if not _is_instance(obj, cls):\r\n standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def verify_is_instance(self, obj, cls, msg=\"\"):\r\n try:\r\n self.assert_is_instance(obj, cls, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def __instancecheck__(self, obj: object) -> NoReturn:\n raise TypeError(\"isinstance() argument 2 cannot be a \"\n \"parameterized generic\")", "def check_class(instance, type):\n\tif not issubclass(instance, type):\n\t\traise TypeError('Subclass expected type {0}, but got: {1}', type(type), type(instance))", "def _validate_type(self, name, obj, *args):\n if obj is None:\n return\n for arg in args:\n if isinstance(obj, arg):\n return\n raise TypeError(self.__class__.__name__ + '.' + name + ' is of type ' + type(obj).__name__ +\n '. Must be equal to None or one of the following types: ' + str(args))", "def verify_is_not_instance(self, obj, cls, msg=\"\"):\r\n try:\r\n self.assert_is_not_instance(obj, cls, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def assertNotIsInstance(self, obj, cls, msg=None):\r\n if _is_instance(obj, cls):\r\n standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def issubclass(obj, cls):\r\n if isinstance(obj, Assert):\r\n obj = obj.obj\r\n return assert_(issubclass(obj, cls),\r\n 'not issubclass(%s, %s)' % (_repr(obj), _repr(cls)))", "def check_type(instance, type):\n\tif not isinstance(instance, type):\n\t\traise TypeError('Instance expected type {0}, but got: {1}', type(type), type(instance))", "def test_from_object_fail(self):\n class InvalidClass(object):\n pass\n Invalid_object = InvalidClass()\n with self.assertRaises(TypeError):\n BaseDataClass.from_object(Invalid_object)", "def _validate_type_not_null(self, name, obj, *args):\n for arg in args:\n if isinstance(obj, arg):\n return\n raise TypeError(self.__class__.__name__ + '.' + name + ' is of type ' + type(obj).__name__ +\n '. Must be one of the following types: ' + str(args))", "def isinstancemethod(cls, obj):\n return _isinstancemethod(cls, obj)", "def test_isinstance(self):\n obj = BaseModel()\n self.assertIsInstance(obj, BaseModel)", "def verifyObject(interfaceCls, obj):\r\n try:\r\n verify.verifyObject(interfaceCls, obj)\r\n except Invalid as e:\r\n raise InterfaceError('Verification of the object of type \"{0}\" '\r\n 'failed: {1}'.format(obj.__class__.__name__, e))", "def assert_type(instance, classtype):\n assert_cond(isinstance(instance, classtype), TypeCheckError(type(instance), classtype))", "def _isinstance(self, instance, raise_error=True):\n\n if isinstance(instance, self.__model__):\n return True\n elif raise_error:\n raise ValueError('{} is not of type {}.'.format(\n instance, self.__model__,\n ))\n else:\n return False", "def __class_validation(cls):\n\n # check if this class is a subClass of Model\n if not issubclass(cls, db.Model):\n raise AttributeError(cls.__name__ + \" is not subclass of \" + db.Model.__name__)", "def check_type(obj, expected_type):\n\n class Model(pydantic.BaseModel):\n data: expected_type\n\n # convert ValidationError to TypeError if the obj does not match the expected type\n try:\n Model(data=obj)\n except pydantic.ValidationError as ve:\n raise TypeError(str(ve.errors()))\n\n return True # allow constructs like assert check_type(x, List[float])", "def test_is_instance(self):\n self.assertIsInstance(self.obj, Rectangle, \"created obj is not an \" +\n \"instance of Rectangle class.\")", "def validate(self, object, name, value):\n raise TraitError(\n \"The '%s' trait of %s instance has an unknown type. \"\n \"Contact the developer to correct the problem.\"\n % (name, class_of(object))\n )", "def match(self, cls):\n return isinstance(self, cls)", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def validate_object_init_type_hint(obj: object,\n log_metadata_validation_failures: bool = True\n ) -> None:\n if not obj or not type(obj).__init__:\n return\n type_hint_dict = get_type_hints(type(obj).__init__)\n if not isinstance(type_hint_dict, dict):\n return\n for var_name, instance_type in type_hint_dict.items():\n generic_alias_map = {List: list, Dict: dict, list: list, dict: dict}\n if hasattr(instance_type, \"__origin__\"):\n if instance_type.__origin__ is Union:\n # optional argument, [1] is None\n instance_type = instance_type.__args__[0]\n if hasattr(instance_type, \"__origin__\"):\n instance_type = generic_alias_map[instance_type.__origin__]\n else:\n instance_type = generic_alias_map[instance_type.__origin__]\n if hasattr(obj, var_name):\n var = getattr(obj, var_name)\n validate_is_instance(var, var_name, instance_type,\n type(obj).__name__, log_metadata_validation_failures)", "def instance_of(cls):\n def check(value):\n return (\n isinstance(value, cls),\n u\"{value!r} is instance of {actual!s}, required {required!s}\".format(\n value=value,\n actual=fullyQualifiedName(type(value)),\n required=fullyQualifiedName(cls),\n ),\n )\n return check", "def CheckType(self, *args, **kwargs):\n pass", "def assert_isinstance(object, class_or_tuple):\n assert isinstance(\n object, class_or_tuple\n ), \"unexpected instance type, expected=%s, actual=%s\" % (\n class_or_tuple,\n type(object),\n )\n return object" ]
[ "0.8022944", "0.7873061", "0.7360076", "0.7278757", "0.7261065", "0.7160888", "0.70439845", "0.7019144", "0.6977047", "0.6889601", "0.6796127", "0.67910004", "0.6718902", "0.6682022", "0.65874743", "0.6350729", "0.62812877", "0.6236786", "0.62262434", "0.6214717", "0.6192635", "0.6140967", "0.6138299", "0.61142945", "0.6094619", "0.6083798", "0.60531485", "0.60508406", "0.60087776", "0.59971684" ]
0.7940661
1
Check all lists in a list are equal length
def assert_len_eq(lists): # Sanity check max_len = max(len(p) for p in lists) for i, p in enumerate(lists): assert len( p ) == max_len, "Length check failed!\nl[{}] has {} elements != {} ({!r})\n{!r}".format( i, len(p), max_len, p, lists )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_size_of_lists(sequence_header, secstr_header):\n if len(sequence_header) != len(sequence):\n sys.exit(\"The size of the sequence list and sequence header doesn't match\")\n else:\n return True", "def test_len(self):\n\t\t# for 2 sample lists, I test that the len of the list is the len\n\t\t# of the LinkedList that is constructed with the list.\n\t\tl1 = [1]\n\t\tself.assertEqual(len(from_list_(l1).print()), len(l1))\n\t\tl2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n\t\tself.assertEqual(len(from_list_(l2).print()), len(l2))", "def assert_same_size(sequences):\n seq_size = len(sequences[0])\n for seq in sequences:\n if len(seq) != seq_size:\n raise SizeError", "def check_lists_equal(list_1, list_2):\n if len(list_1) != len(list_2):\n return False\n return sorted(list_1) == sorted(list_2)", "def _assert_same_length(\n list_series_1: Sequence[TimeSeries],\n list_series_2: Sequence[TimeSeries],\n):\n\n raise_if_not(\n len(list_series_1) == len(list_series_2),\n \"Sequences of series must be of the same length, found length:\"\n + f\" {len(list_series_1)} and {len(list_series_2)}.\",\n )", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def equal_length(crime):\n length = len(crime[0])\n for i in range(len(crime)):\n boo = length == len(crime[i])\n if boo == False:\n return(False)\n break\n return(True)", "def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])", "def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions", "def check_consistent_length(y_true: List[List[str]], y_pred: List[List[str]]):\n len_true = list(map(len, y_true))\n len_pred = list(map(len, y_pred))\n is_list = set(map(type, y_true)) | set(map(type, y_pred))\n\n if len(y_true) != len(y_pred) or len_true != len_pred:\n message = 'Found input variables with inconsistent numbers of samples:\\n{}\\n{}'.format(len_true, len_pred)\n raise ValueError(message)", "def lists_equal_length(func):\n # Define the wrapper function.\n def wrapper(self, *args, **kwargs):\n\n # Collect all `list` objects from `args`.\n lists_args = [arg for arg in args if isinstance(arg, list)]\n # Collecgt all `list` object from `kwargs`.\n lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)]\n # Concatenate the lists of `list` objects.\n lists = lists_args + lists_kwargs\n\n # Check whether all the `list` objects have the same length.\n do_have_same_length = len(set(map(len, lists))) == 1\n\n # Raise an `InvalidArgumentsError` exception if there's a length\n # mismatch.\n if not do_have_same_length:\n msg_fmt = \"The argument lists must have the same length.\"\n raise InvalidArgumentsError(msg_fmt)\n\n # Simply execute the decorated method with the provided arguments\n # and return the result.\n return func(self, *args, **kwargs)\n\n return wrapper", "def listLengths(input_list):\r\n return [len(item) for item in input_list]", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def all_equal(list_a, list_b):\n if len(list_a) != len(list_b):\n return False\n a, b = np.array(list_a), np.array(list_b)\n return all(a == b)", "def count_equal(a_list: list, b_list: list) -> None:\n a_list, b_list = sorted(a_list), sorted(b_list)\n assert len(a_list) == len(b_list)\n assert all([a == b for a, b in zip(a_list, b_list)])", "def check_equal(list1, list2):\n return len(list1) == len(list2) and sorted(list1) == sorted(list2)", "def check_values_same(lst1, lst2):\n\treturn len(lst1) == len(lst2) and sorted(lst1) == sorted(lst2)", "def is_equal(a: list[int], b: list[int]) -> bool:\n a_length: int = len(a)\n b_length: int = len(b)\n if a_length == 0 and b_length == 0:\n return True\n else:\n i = 0\n if a_length == b_length:\n if a_length <= len(b):\n while i < a_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n while i < b_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n return False", "def _check_values_len(self, data_batch: Dict[str, List[str]]):\n values_len = [len(v) for _, v in data_batch.items()]\n unique_len = len(set(values_len))\n assert unique_len == 1, \"Length of values are not consistent across\"", "def test_merge_list_same(short_ll, small_ll):\n assert ml(short_ll, small_ll) == 8\n assert len(small_ll) == 8", "def _all_equal(arg):\n return arg.count(arg[0]) == len(arg)", "def is_equal(a: list[int], b: list[int]) -> bool:\n if len(a) == len(b):\n i: int = 0\n num: int = 0\n while i < len(a):\n if a[i] == b[i]:\n i = i + 1\n num = num + 1\n else:\n i = i + 1\n return (num == len(a))\n else:\n return False", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n", "def checkSameLengthFlatLists():\n # Read the flatlist into a python list.\n with open('./flatlist', 'r') as f:\n flatlist = f.readlines()\n # Read the flatdarklist into a python list.\n with open('./flatdarklist', 'r') as f:\n flatdarklist = f.readlines()\n # Check that both lists are the same length.\n if len(flatlist) != len(flatdarklist):\n # Print a nice loud warning.\n logging.info(\"\\n#####################################################################\")\n logging.info(\"#####################################################################\")\n logging.info(\"\")\n logging.info(\" WARNING in sort: flatlist and flatdarklist are not the same \")\n logging.info(\" length. Removing extra entries from the\")\n logging.info(\" longer list. Original lists can be found in\")\n logging.info(\" original_flatlist and original_flatdarklist.\")\n logging.info(\"\")\n logging.info(\"#####################################################################\")\n logging.info(\"#####################################################################\\n\")\n # Copy the original flatlist and flatdarklist to backup files.\n shutil.copy2('./flatlist', './original_flatlist')\n shutil.copy2('./flatdarklist', './original_flatdarklist')\n # while they are not the same length:\n while len(flatlist) != len(flatdarklist):\n # remove the last entry from the longer list.\n if len(flatlist) > len(flatdarklist):\n del flatlist[-1]\n else:\n del flatdarklist[-1]\n # Write the new flatlist to the flatlist textfile, overwriting anything already there.\n with open('./flatlist', 'w') as f:\n for item in flatlist:\n f.write(item)\n # Write the new flatdarklist to the flatdarklist textfile, overwriting anything already there.\n with open('./flatdarklist', 'w') as f:\n for item in flatdarklist:\n f.write(item)", "def lists_are_identical(list1, list2):\n if len(list1) != len(list2):\n return False\n for elem1, elem2 in zip(list1, list2):\n if elem1 != elem2:\n return False\n return True", "def number_of_valid_pass_puzzle_2(input_list: list):\n num_of_valid = 0\n for item in input_list:\n data = split_data(item)\n if check_for_validity_puzzle_2(*data):\n num_of_valid += 1\n return num_of_valid", "def checkLines(list, max_length):\n problems = []\n for i in range(len(list)):\n lineLength = len(list[i])\n if lineLength > max_length:\n problems.append((i, lineLength))\n return problems", "def unique_list(var):\n return len([x for x in set(var)]) == len(var)", "def validate_outcome_length(outcomes):\n # Make sure each outcome is a container.\n try:\n lengths = list(map(len, outcomes))\n except TypeError:\n raise ditException('One or more outcomes is not a container.')\n else:\n outcome_length = lengths[0]\n\n # Make sure each outcome has the same length.\n equal_lengths = np.alltrue(np.equal(lengths, outcome_length))\n if not equal_lengths:\n raise ditException('Not all outcomes have the same length.')\n else:\n return True", "def number_of_valid_pass_puzzle_1(input_list: list):\n num_of_valid = 0\n for item in input_list:\n data = split_data(item)\n if check_for_validity_puzzle_1(*data):\n num_of_valid += 1\n return num_of_valid" ]
[ "0.6904997", "0.68040586", "0.680279", "0.67442155", "0.6725389", "0.6724341", "0.671778", "0.66840255", "0.6568775", "0.65605485", "0.65421414", "0.6539833", "0.6526015", "0.6373606", "0.63600415", "0.6360039", "0.63565075", "0.63232565", "0.6305891", "0.630208", "0.6297076", "0.62825656", "0.6249381", "0.62492424", "0.62308043", "0.62280005", "0.62141013", "0.61953634", "0.61846185", "0.61754847" ]
0.8220756
0