language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def released_token(chain, team_multisig, token, release_agent, customer) -> Contract: """Create a Crowdsale token where transfer restrictions have been lifted.""" token.functions.setReleaseAgent(release_agent.address).transact({"from": team_multisig}) release_agent.functions.release().transact({"from": team_multisig}) # Make sure customer 1 has some token balance token.functions.transfer(customer, 10000).transact({"from": team_multisig}) return token
def released_token(chain, team_multisig, token, release_agent, customer) -> Contract: """Create a Crowdsale token where transfer restrictions have been lifted.""" token.functions.setReleaseAgent(release_agent.address).transact({"from": team_multisig}) release_agent.functions.release().transact({"from": team_multisig}) # Make sure customer 1 has some token balance token.functions.transfer(customer, 10000).transact({"from": team_multisig}) return token
Python
def empty_token(chain, team_multisig, token_name, token_symbol) -> Contract: """Create the token contract without initial supply.""" args = [token_name, token_symbol, 0, 0, True] # Owner set tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('CrowdsaleToken', deploy_args=args, deploy_transaction=tx) return contract
def empty_token(chain, team_multisig, token_name, token_symbol) -> Contract: """Create the token contract without initial supply.""" args = [token_name, token_symbol, 0, 0, True] # Owner set tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('CrowdsaleToken', deploy_args=args, deploy_transaction=tx) return contract
Python
def tranche_pricing(chain, proxy_buyer, team_multisig): """ETH tanche pricing for testing presale counters.""" args = [ [ to_wei("0", "ether"), to_wei("0.00666666", "ether"), to_wei("10001", "ether"), to_wei("0.00714285", "ether"), to_wei("30001", "ether"), to_wei("0.00769230", "ether"), to_wei("50001", "ether"), to_wei("0.00833333", "ether"), to_wei("75001", "ether"), to_wei("0.00909090", "ether"), to_wei("100001", "ether"), to_wei("0.01000000", "ether"), to_wei("1000000000", "ether"), to_wei("0.01000000", "ether"), to_wei("1000000000000", "ether"), to_wei("0.00", "ether") ], ] tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('EthTranchePricing', deploy_args=args, deploy_transaction=tx) contract.transact({"from": team_multisig}).setPreicoAddress(proxy_buyer.address, to_wei("0.05", "ether")) return contract
def tranche_pricing(chain, proxy_buyer, team_multisig): """ETH tanche pricing for testing presale counters.""" args = [ [ to_wei("0", "ether"), to_wei("0.00666666", "ether"), to_wei("10001", "ether"), to_wei("0.00714285", "ether"), to_wei("30001", "ether"), to_wei("0.00769230", "ether"), to_wei("50001", "ether"), to_wei("0.00833333", "ether"), to_wei("75001", "ether"), to_wei("0.00909090", "ether"), to_wei("100001", "ether"), to_wei("0.01000000", "ether"), to_wei("1000000000", "ether"), to_wei("0.01000000", "ether"), to_wei("1000000000000", "ether"), to_wei("0.00", "ether") ], ] tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('EthTranchePricing', deploy_args=args, deploy_transaction=tx) contract.transact({"from": team_multisig}).setPreicoAddress(proxy_buyer.address, to_wei("0.05", "ether")) return contract
Python
def sign(data: bytes, private_key_seed_ascii: str, hash_function=sha256_msg): """Sign data using Ethereum private key. :param private_key_seed_ascii: Private key seed as ASCII string """ priv_key = PrivateKey(Web3.sha3(text=private_key_seed_ascii)) msghash = hash_function(data) signature = priv_key.sign_msg_hash(msghash) v, r, s = signature.vrs # assuming chainID is 1 i.e the main net # TODO: take in chainID as a param, so that v is set appropriately # currently there's no good way to determine chainID v = to_eth_v(v) r_bytes = to_bytes(r) s_bytes = to_bytes(s) # Make sure we use bytes data and zero padding stays # good across different systems r_hex = binascii.hexlify(r_bytes).decode("ascii") s_hex = binascii.hexlify(s_bytes).decode("ascii") # Convert to Etheruem address format pub_key = priv_key.public_key addr = pub_key.to_checksum_address() pub = pub_key.to_bytes() # # Return various bits about signing so it's easier to debug return { "signature": signature, "v": v, "r": r, "s": s, "r_bytes": r_bytes, "s_bytes": s_bytes, "r_hex": "0x" + r_hex, "s_hex": "0x" + s_hex, "address_bitcoin": addr, "address_ethereum": get_ethereum_address_from_private_key(private_key_seed_ascii), "public_key": pub, "hash": msghash, "payload": binascii.hexlify(bytes([v] + list(r_bytes) + list(s_bytes,))) }
def sign(data: bytes, private_key_seed_ascii: str, hash_function=sha256_msg): """Sign data using Ethereum private key. :param private_key_seed_ascii: Private key seed as ASCII string """ priv_key = PrivateKey(Web3.sha3(text=private_key_seed_ascii)) msghash = hash_function(data) signature = priv_key.sign_msg_hash(msghash) v, r, s = signature.vrs # assuming chainID is 1 i.e the main net # TODO: take in chainID as a param, so that v is set appropriately # currently there's no good way to determine chainID v = to_eth_v(v) r_bytes = to_bytes(r) s_bytes = to_bytes(s) # Make sure we use bytes data and zero padding stays # good across different systems r_hex = binascii.hexlify(r_bytes).decode("ascii") s_hex = binascii.hexlify(s_bytes).decode("ascii") # Convert to Etheruem address format pub_key = priv_key.public_key addr = pub_key.to_checksum_address() pub = pub_key.to_bytes() # # Return various bits about signing so it's easier to debug return { "signature": signature, "v": v, "r": r, "s": s, "r_bytes": r_bytes, "s_bytes": s_bytes, "r_hex": "0x" + r_hex, "s_hex": "0x" + s_hex, "address_bitcoin": addr, "address_ethereum": get_ethereum_address_from_private_key(private_key_seed_ascii), "public_key": pub, "hash": msghash, "payload": binascii.hexlify(bytes([v] + list(r_bytes) + list(s_bytes,))) }
Python
def verify(msghash: bytes, signature, public_key): """Verify that data has been signed with Etheruem private key. :param signature: :return: """ key_api = KeyAPI('eth_keys.backends.NativeECCBackend') return key_api.ecdsa_verify(msghash, Signature(signature), PublicKey(public_key))
def verify(msghash: bytes, signature, public_key): """Verify that data has been signed with Etheruem private key. :param signature: :return: """ key_api = KeyAPI('eth_keys.backends.NativeECCBackend') return key_api.ecdsa_verify(msghash, Signature(signature), PublicKey(public_key))
Python
def milestone_ico(chain, team_multisig, start_time, milestone_pricing, preico_cap, preico_funding_goal, token, presale_fund_collector, end_time) -> Contract: """Create a crowdsale contract that uses milestone based pricing.""" args = [ token.address, milestone_pricing.address, team_multisig, start_time, end_time, 0, ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('UncappedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True return contract
def milestone_ico(chain, team_multisig, start_time, milestone_pricing, preico_cap, preico_funding_goal, token, presale_fund_collector, end_time) -> Contract: """Create a crowdsale contract that uses milestone based pricing.""" args = [ token.address, milestone_pricing.address, team_multisig, start_time, end_time, 0, ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('UncappedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True return contract
Python
def aml_reclaim_setup(aml_token: Contract, team_multisig: str, customer: str, customer_2): """Setup some tokens for accounts for performing the reclaim test. .""" aml_token.functions.setTransferAgent(team_multisig, True).transact({"from": team_multisig}) aml_token.functions.transfer(customer, 1000000).transact({"from": team_multisig}) aml_token.functions.transfer(customer_2, 2000000).transact({"from": team_multisig})
def aml_reclaim_setup(aml_token: Contract, team_multisig: str, customer: str, customer_2): """Setup some tokens for accounts for performing the reclaim test. .""" aml_token.functions.setTransferAgent(team_multisig, True).transact({"from": team_multisig}) aml_token.functions.transfer(customer, 1000000).transact({"from": team_multisig}) aml_token.functions.transfer(customer_2, 2000000).transact({"from": team_multisig})
Python
def csv_stream(aml_reclaim_setup, customer): """Set up a CSV file for reclaim""" source = CSV_SOURCE.format(customer) return StringIO(source)
def csv_stream(aml_reclaim_setup, customer): """Set up a CSV file for reclaim""" source = CSV_SOURCE.format(customer) return StringIO(source)
Python
def vault(chain, team_multisig, token, unlock_time): """Deploy a loaded vault contract and move all tokens there.""" args = [ team_multisig, token.address, unlock_time ] contract, hash = chain.provider.deploy_contract('TimeVault', deploy_args=args) # Load all tokens to the vault token.functions.transfer(contract.address, 1000000).transact({"from": team_multisig}) return contract
def vault(chain, team_multisig, token, unlock_time): """Deploy a loaded vault contract and move all tokens there.""" args = [ team_multisig, token.address, unlock_time ] contract, hash = chain.provider.deploy_contract('TimeVault', deploy_args=args) # Load all tokens to the vault token.functions.transfer(contract.address, 1000000).transact({"from": team_multisig}) return contract
Python
def crowdsale(chain, team_multisig, start_time, end_time, pricing_strategy, preico_cap, minimum_funding_goal, cap, token) -> Contract: """Create a crowdsale contract that has a minting cap and bonus % and token sold limit.""" args = [ token.address, pricing_strategy.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('MintedEthCappedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() assert contract.call().weiCap() == cap # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True return contract
def crowdsale(chain, team_multisig, start_time, end_time, pricing_strategy, preico_cap, minimum_funding_goal, cap, token) -> Contract: """Create a crowdsale contract that has a minting cap and bonus % and token sold limit.""" args = [ token.address, pricing_strategy.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('MintedEthCappedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() assert contract.call().weiCap() == cap # Allow crowdsale contract to do mint() token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert token.functions.mintAgents(contract.address).call() == True return contract
Python
def participate_early(chain, web3: Web3, presale_address: str, crowdsale_address: str, deploy_address: str, start=0, end=32, timeout=300) -> int: """Move funds over early. .. note :: Crowdsale contract checks the participate whitelist by invest address, not by msg.sender. This process will open the presale investors an ability to participate to the crowdsale early, bypassing the retail investor start time. However they could also top up their existing preico accounts, so this is largerly no issue. :param start: Move only n investors (for testing purposes) :param end: Move only n investors (for testing purposes) """ updated = 0 PresaleFundCollector = get_contract_by_name(chain, "PresaleFundCollector") presale = PresaleFundCollector(address=presale_address) Crowdsale = PresaleFundCollector = get_contract_by_name(chain, "Crowdsale") crowdsale = Crowdsale(address=crowdsale_address) # Make sure presale is correctly set txid = presale.transact({"from": deploy_address}).setCrowdsale(crowdsale.address) logger.info("Setting presale crowdsale address to %s on txid", crowdsale.address, txid) check_succesful_tx(web3, txid, timeout=timeout) # Double check presale has a presale price set MilestonePricing = get_contract_by_name(chain, "MilestonePricing") pricing_strategy = MilestonePricing(address=crowdsale.functions.pricingStrategy().call()) if not pricing_strategy.functions.preicoAddresses(presale.address).call(): raise RuntimeError("Was not listed as presale address for pricing: {}".format(presale.address)) for i in range(start, min(end, presale.functions.investorCount().call())): investor = presale.functions.investors(i).call() if presale.call().balances(investor) > 0: print("Whitelisting for {} to crowdsale {}".format(investor, crowdsale.address)) txid = crowdsale.functions.setEarlyParicipantWhitelist(investor, True).transact({"from": deploy_address}) print("Broadcasting whitelist transaction {}".format(txid)) check_succesful_tx(web3, txid, timeout=timeout) funds = from_wei(presale.functions.balances(investor).call(), "ether") print("Moving funds {} ETH for investor {} to presale {}".format(funds, investor, presale.address)) txid = presale.functions.participateCrowdsaleInvestor(investor).transact({"from": deploy_address}) print("Broadcasting transaction {}".format(txid)) check_succesful_tx(web3, txid, timeout=timeout) updated += 1 else: print("Investor already handled: {}".format(investor)) return updated
def participate_early(chain, web3: Web3, presale_address: str, crowdsale_address: str, deploy_address: str, start=0, end=32, timeout=300) -> int: """Move funds over early. .. note :: Crowdsale contract checks the participate whitelist by invest address, not by msg.sender. This process will open the presale investors an ability to participate to the crowdsale early, bypassing the retail investor start time. However they could also top up their existing preico accounts, so this is largerly no issue. :param start: Move only n investors (for testing purposes) :param end: Move only n investors (for testing purposes) """ updated = 0 PresaleFundCollector = get_contract_by_name(chain, "PresaleFundCollector") presale = PresaleFundCollector(address=presale_address) Crowdsale = PresaleFundCollector = get_contract_by_name(chain, "Crowdsale") crowdsale = Crowdsale(address=crowdsale_address) # Make sure presale is correctly set txid = presale.transact({"from": deploy_address}).setCrowdsale(crowdsale.address) logger.info("Setting presale crowdsale address to %s on txid", crowdsale.address, txid) check_succesful_tx(web3, txid, timeout=timeout) # Double check presale has a presale price set MilestonePricing = get_contract_by_name(chain, "MilestonePricing") pricing_strategy = MilestonePricing(address=crowdsale.functions.pricingStrategy().call()) if not pricing_strategy.functions.preicoAddresses(presale.address).call(): raise RuntimeError("Was not listed as presale address for pricing: {}".format(presale.address)) for i in range(start, min(end, presale.functions.investorCount().call())): investor = presale.functions.investors(i).call() if presale.call().balances(investor) > 0: print("Whitelisting for {} to crowdsale {}".format(investor, crowdsale.address)) txid = crowdsale.functions.setEarlyParicipantWhitelist(investor, True).transact({"from": deploy_address}) print("Broadcasting whitelist transaction {}".format(txid)) check_succesful_tx(web3, txid, timeout=timeout) funds = from_wei(presale.functions.balances(investor).call(), "ether") print("Moving funds {} ETH for investor {} to presale {}".format(funds, investor, presale.address)) txid = presale.functions.participateCrowdsaleInvestor(investor).transact({"from": deploy_address}) print("Broadcasting transaction {}".format(txid)) check_succesful_tx(web3, txid, timeout=timeout) updated += 1 else: print("Investor already handled: {}".format(investor)) return updated
Python
def basic_kyc(chain, team_multisig) -> Contract: """Create the transaction verifier contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('BasicKYC', deploy_transaction=tx) check_gas(chain, hash_) return contract
def basic_kyc(chain, team_multisig) -> Contract: """Create the transaction verifier contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('BasicKYC', deploy_transaction=tx) check_gas(chain, hash_) return contract
Python
def mock_security_transfer_agent(chain, team_multisig) -> Contract: """Create the transaction verifier contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('MockSecurityTransferAgent', deploy_transaction=tx) check_gas(chain, hash_) return contract
def mock_security_transfer_agent(chain, team_multisig) -> Contract: """Create the transaction verifier contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('MockSecurityTransferAgent', deploy_transaction=tx) check_gas(chain, hash_) return contract
Python
def restricted_transfer_agent(chain, team_multisig, basic_kyc) -> Contract: """Create the transaction verifier contract.""" args = [basic_kyc.address] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('RestrictedTransferAgent', deploy_args=args, deploy_transaction=tx) check_gas(chain, basic_kyc.transact(tx).adminAddRole(team_multisig, "setter")) check_gas(chain, hash_) return contract
def restricted_transfer_agent(chain, team_multisig, basic_kyc) -> Contract: """Create the transaction verifier contract.""" args = [basic_kyc.address] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('RestrictedTransferAgent', deploy_args=args, deploy_transaction=tx) check_gas(chain, basic_kyc.transact(tx).adminAddRole(team_multisig, "setter")) check_gas(chain, hash_) return contract
Python
def advanced_transfer_agent(chain, team_multisig, basic_kyc) -> Contract: """Create the transaction verifier contract.""" args = [basic_kyc.address] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('AdvancedTransferAgent', deploy_args=args, deploy_transaction=tx) check_gas(chain, hash_) return contract
def advanced_transfer_agent(chain, team_multisig, basic_kyc) -> Contract: """Create the transaction verifier contract.""" args = [basic_kyc.address] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('AdvancedTransferAgent', deploy_args=args, deploy_transaction=tx) check_gas(chain, hash_) return contract
Python
def announcement(chain, team_multisig, announcement_name, announcement_uri, announcement_type, announcement_hash) -> Contract: """Create a bogus announcement for testing""" args = [to_bytes(text=announcement_name), to_bytes(text=announcement_uri), announcement_type, announcement_hash] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('BogusAnnouncement', deploy_args=args, deploy_transaction=tx) check_gas(chain, hash_) assert removeNonPrintable(contract.call().announcementName()) == announcement_name assert removeNonPrintable(contract.call().announcementURI()) == announcement_uri assert contract.call().announcementType() == announcement_type return contract
def announcement(chain, team_multisig, announcement_name, announcement_uri, announcement_type, announcement_hash) -> Contract: """Create a bogus announcement for testing""" args = [to_bytes(text=announcement_name), to_bytes(text=announcement_uri), announcement_type, announcement_hash] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('BogusAnnouncement', deploy_args=args, deploy_transaction=tx) check_gas(chain, hash_) assert removeNonPrintable(contract.call().announcementName()) == announcement_name assert removeNonPrintable(contract.call().announcementURI()) == announcement_uri assert contract.call().announcementType() == announcement_type return contract
Python
def receiver(chain, team_multisig) -> Contract: """Create the receiver contract for callback testing.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('MockERC677Receiver', deploy_transaction=tx) return contract
def receiver(chain, team_multisig) -> Contract: """Create the receiver contract for callback testing.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('MockERC677Receiver', deploy_transaction=tx) return contract
Python
def failsafetester(chain, team_multisig) -> Contract: """Create a contract for testing the failsafe.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('TestCheckpointFailsafe', deploy_transaction=tx) return contract
def failsafetester(chain, team_multisig) -> Contract: """Create a contract for testing the failsafe.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('TestCheckpointFailsafe', deploy_transaction=tx) return contract
Python
def security_token_verifier(chain, team_multisig) -> Contract: """Create the transaction verifier contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('MockSecurityTransferAgent', deploy_transaction=tx) check_gas(chain, hash_) return contract
def security_token_verifier(chain, team_multisig) -> Contract: """Create the transaction verifier contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('MockSecurityTransferAgent', deploy_transaction=tx) check_gas(chain, hash_) return contract
Python
def mock_kyc(chain, team_multisig, customer) -> Contract: """Create the Mock KYC contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('BasicKYC', deploy_transaction=tx) check_gas(chain, hash_) check_gas(chain, contract.transact(tx).adminAddRole(team_multisig, "setter")) check_gas(chain, contract.transact(tx).setAttributes(customer, 1)) check_gas(chain, contract.transact(tx).setAttributes(team_multisig, 1)) return contract
def mock_kyc(chain, team_multisig, customer) -> Contract: """Create the Mock KYC contract.""" tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('BasicKYC', deploy_transaction=tx) check_gas(chain, hash_) check_gas(chain, contract.transact(tx).adminAddRole(team_multisig, "setter")) check_gas(chain, contract.transact(tx).setAttributes(customer, 1)) check_gas(chain, contract.transact(tx).setAttributes(team_multisig, 1)) return contract
Python
def restricted_transfer_agent(chain, team_multisig, mock_kyc) -> Contract: """Create the transaction verifier contract.""" args = [mock_kyc.address] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('RestrictedTransferAgent', deploy_args=args, deploy_transaction=tx) check_gas(chain, hash_) return contract
def restricted_transfer_agent(chain, team_multisig, mock_kyc) -> Contract: """Create the transaction verifier contract.""" args = [mock_kyc.address] tx = { "from": team_multisig } contract, hash_ = chain.provider.deploy_contract('RestrictedTransferAgent', deploy_args=args, deploy_transaction=tx) check_gas(chain, hash_) return contract
Python
def new_token(chain, team_multisig, token_name, token_symbol) -> Contract: """Get another token contract with 0 initial issuance.""" args = [token_name, token_symbol, 0, 0, True] # Owner set tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('CrowdsaleToken', deploy_args=args, deploy_transaction=tx) return contract
def new_token(chain, team_multisig, token_name, token_symbol) -> Contract: """Get another token contract with 0 initial issuance.""" args = [token_name, token_symbol, 0, 0, True] # Owner set tx = { "from": team_multisig } contract, hash = chain.provider.deploy_contract('CrowdsaleToken', deploy_args=args, deploy_transaction=tx) return contract
Python
def success_sample_data(customer, customer_2): """Enough to break the minimum funding goal""" data = """ Address,Payment at,Tx hash,Tx index,Invested ETH,Received tokens {},2017-04-13T16:01:46+00:00,0xf88780859cfde239e5898d036b685f5358d4b0a0f82e8cce26403c782f8a2e52,1,0.505,561 {},2017-04-13T16:02:38+00:00,0x1385320b9d693afad1dce05cb0f9c8c3c1bc017668d32ee2b69d4039fdaf5983,3,0.1,111 {},2017-04-13T16:02:38+00:00,0x1385320b9d693afad1dce05cb0f9c8c3c1bc017668d32ee2b69d4039fdaf5984,4,7500,1111 """.strip().format(customer, customer_2, customer_2) return list(csv.DictReader(StringIO(data)))
def success_sample_data(customer, customer_2): """Enough to break the minimum funding goal""" data = """ Address,Payment at,Tx hash,Tx index,Invested ETH,Received tokens {},2017-04-13T16:01:46+00:00,0xf88780859cfde239e5898d036b685f5358d4b0a0f82e8cce26403c782f8a2e52,1,0.505,561 {},2017-04-13T16:02:38+00:00,0x1385320b9d693afad1dce05cb0f9c8c3c1bc017668d32ee2b69d4039fdaf5983,3,0.1,111 {},2017-04-13T16:02:38+00:00,0x1385320b9d693afad1dce05cb0f9c8c3c1bc017668d32ee2b69d4039fdaf5984,4,7500,1111 """.strip().format(customer, customer_2, customer_2) return list(csv.DictReader(StringIO(data)))
Python
def relaunched_crowdsale(chain, team_multisig, start_time, end_time, milestone_pricing, preico_cap, minimum_funding_goal, cap, original_crowdsale, new_token, founder_allocation) -> Contract: """Create a crowdsale with fixed contracts.""" args = [ new_token.address, milestone_pricing.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('RelaunchedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not new_token.functions.released().call() assert contract.functions.maximumSellableTokens().call() == cap # Allow crowdsale contract to do mint() new_token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert new_token.functions.mintAgents(contract.address).call() == True set_finalizer(chain, new_token, contract, team_multisig, founder_allocation) return contract
def relaunched_crowdsale(chain, team_multisig, start_time, end_time, milestone_pricing, preico_cap, minimum_funding_goal, cap, original_crowdsale, new_token, founder_allocation) -> Contract: """Create a crowdsale with fixed contracts.""" args = [ new_token.address, milestone_pricing.address, team_multisig, start_time, end_time, minimum_funding_goal, cap ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('RelaunchedCrowdsale', deploy_args=args, deploy_transaction=tx) assert contract.functions.owner().call() == team_multisig assert not new_token.functions.released().call() assert contract.functions.maximumSellableTokens().call() == cap # Allow crowdsale contract to do mint() new_token.functions.setMintAgent(contract.address, True).transact({"from": team_multisig}) assert new_token.functions.mintAgents(contract.address).call() == True set_finalizer(chain, new_token, contract, team_multisig, founder_allocation) return contract
Python
def kyc_crowdsale(chain, team_multisig, preico_starts_at, preico_ends_at, pricing, preico_cap, preico_funding_goal, preico_token_allocation, kyc_token, signer_address, default_finalize_agent, initial_supply) -> Contract: """Create a Pre-ICO crowdsale contract.""" token = kyc_token args = [ token.address, pricing.address, team_multisig, preico_starts_at, preico_ends_at, preico_funding_goal, team_multisig ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('KYCCrowdsale', deploy_args=args, deploy_transaction=tx) args = [ contract.address ] finalizer_contract, hash = chain.provider.deploy_contract('NullFinalizeAgent', deploy_args=args) contract.functions.setFinalizeAgent(finalizer_contract.address).transact({"from": team_multisig}) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() # Allow the token sale contract to distribute unreleased tokens # token.transact({"from": team_multisig}).setTransferAgent(contract.address, True) token.functions.setTransferAgent(team_multisig, True).transact({"from": team_multisig}) token.functions.approve(contract.address, initial_supply).transact({"from": team_multisig}) token.functions.setReleaseAgent(team_multisig).transact({"from": team_multisig}) contract.functions.setSignerAddress(signer_address).transact({"from": team_multisig}) return contract
def kyc_crowdsale(chain, team_multisig, preico_starts_at, preico_ends_at, pricing, preico_cap, preico_funding_goal, preico_token_allocation, kyc_token, signer_address, default_finalize_agent, initial_supply) -> Contract: """Create a Pre-ICO crowdsale contract.""" token = kyc_token args = [ token.address, pricing.address, team_multisig, preico_starts_at, preico_ends_at, preico_funding_goal, team_multisig ] tx = { "from": team_multisig, } contract, hash = chain.provider.deploy_contract('KYCCrowdsale', deploy_args=args, deploy_transaction=tx) args = [ contract.address ] finalizer_contract, hash = chain.provider.deploy_contract('NullFinalizeAgent', deploy_args=args) contract.functions.setFinalizeAgent(finalizer_contract.address).transact({"from": team_multisig}) assert contract.functions.owner().call() == team_multisig assert not token.functions.released().call() # Allow the token sale contract to distribute unreleased tokens # token.transact({"from": team_multisig}).setTransferAgent(contract.address, True) token.functions.setTransferAgent(team_multisig, True).transact({"from": team_multisig}) token.functions.approve(contract.address, initial_supply).transact({"from": team_multisig}) token.functions.setReleaseAgent(team_multisig).transact({"from": team_multisig}) contract.functions.setSignerAddress(signer_address).transact({"from": team_multisig}) return contract
Python
def token_10000(chain, team_multisig): """Unlocked token with mint of 10,000 ethereum decimal units""" args = [ team_multisig, "Token", "TKN", 10000 * 10**18, 0, chain.web3.eth.getBlock('pending').timestamp ] contract, hash = chain.provider.deploy_contract('CentrallyIssuedToken', deploy_args=args) assert contract.functions.balanceOf(team_multisig).call() == 10000 * 10**18 contract.functions.releaseTokenTransfer().transact({"from": team_multisig}) return contract
def token_10000(chain, team_multisig): """Unlocked token with mint of 10,000 ethereum decimal units""" args = [ team_multisig, "Token", "TKN", 10000 * 10**18, 0, chain.web3.eth.getBlock('pending').timestamp ] contract, hash = chain.provider.deploy_contract('CentrallyIssuedToken', deploy_args=args) assert contract.functions.balanceOf(team_multisig).call() == 10000 * 10**18 contract.functions.releaseTokenTransfer().transact({"from": team_multisig}) return contract
Python
def token_vault_tapped(chain, team_multisig, token, freeze_ends_at) -> Contract: """Another token vault deployment with a single customer.""" total = 3000 args = [ team_multisig, freeze_ends_at, token.address, total, ] contract, hash = chain.provider.deploy_contract('TokenVault', deploy_args=args) return contract
def token_vault_tapped(chain, team_multisig, token, freeze_ends_at) -> Contract: """Another token vault deployment with a single customer.""" total = 3000 args = [ team_multisig, freeze_ends_at, token.address, total, ] contract, hash = chain.provider.deploy_contract('TokenVault', deploy_args=args) return contract
Python
def loaded_token_vault(token_vault, team_multisig, token_vault_balances): """Token vault with investor balances set.""" for address, balance in token_vault_balances: token_vault.functions.setInvestor(address, balance, 0).transact({"from": team_multisig}) return token_vault
def loaded_token_vault(token_vault, team_multisig, token_vault_balances): """Token vault with investor balances set.""" for address, balance in token_vault_balances: token_vault.functions.setInvestor(address, balance, 0).transact({"from": team_multisig}) return token_vault
Python
async def exchange_peer_info(endpoint, msg_tag, ctrl_tag, listener): """Help function that exchange endpoint information""" # Pack peer information incl. a checksum fmt = "QQQ" my_info = struct.pack(fmt, msg_tag, ctrl_tag, hash64bits(msg_tag, ctrl_tag)) peer_info = bytearray(len(my_info)) my_info_arr = Array(my_info) peer_info_arr = Array(peer_info) # Send/recv peer information. Notice, we force an `await` between the two # streaming calls (see <https://github.com/rapidsai/ucx-py/pull/509>) if listener is True: await comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes) await comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes) else: await comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes) await comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes) # Unpacking and sanity check of the peer information ret = {} (ret["msg_tag"], ret["ctrl_tag"], ret["checksum"]) = struct.unpack(fmt, peer_info) expected_checksum = hash64bits(ret["msg_tag"], ret["ctrl_tag"]) if expected_checksum != ret["checksum"]: raise RuntimeError( f'Checksum invalid! {hex(expected_checksum)} != {hex(ret["checksum"])}' ) return ret
async def exchange_peer_info(endpoint, msg_tag, ctrl_tag, listener): """Help function that exchange endpoint information""" # Pack peer information incl. a checksum fmt = "QQQ" my_info = struct.pack(fmt, msg_tag, ctrl_tag, hash64bits(msg_tag, ctrl_tag)) peer_info = bytearray(len(my_info)) my_info_arr = Array(my_info) peer_info_arr = Array(peer_info) # Send/recv peer information. Notice, we force an `await` between the two # streaming calls (see <https://github.com/rapidsai/ucx-py/pull/509>) if listener is True: await comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes) await comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes) else: await comm.stream_recv(endpoint, peer_info_arr, peer_info_arr.nbytes) await comm.stream_send(endpoint, my_info_arr, my_info_arr.nbytes) # Unpacking and sanity check of the peer information ret = {} (ret["msg_tag"], ret["ctrl_tag"], ret["checksum"]) = struct.unpack(fmt, peer_info) expected_checksum = hash64bits(ret["msg_tag"], ret["ctrl_tag"]) if expected_checksum != ret["checksum"]: raise RuntimeError( f'Checksum invalid! {hex(expected_checksum)} != {hex(ret["checksum"])}' ) return ret
Python
def handle_ctrl_msg(ep_weakref, log, msg, future): """Function that is called when receiving the control message""" try: future.result() except UCXCanceled: return # The ctrl signal was canceled logger.debug(log) ep = ep_weakref() if ep is None or ep.closed(): if ep is not None: ep.abort() return # The endpoint is closed opcode, close_after_n_recv = CtrlMsg.deserialize(msg) if opcode == 1: ep.close_after_n_recv(close_after_n_recv, count_from_ep_creation=True) else: raise UCXError("Received unknown control opcode: %s" % opcode)
def handle_ctrl_msg(ep_weakref, log, msg, future): """Function that is called when receiving the control message""" try: future.result() except UCXCanceled: return # The ctrl signal was canceled logger.debug(log) ep = ep_weakref() if ep is None or ep.closed(): if ep is not None: ep.abort() return # The endpoint is closed opcode, close_after_n_recv = CtrlMsg.deserialize(msg) if opcode == 1: ep.close_after_n_recv(close_after_n_recv, count_from_ep_creation=True) else: raise UCXError("Received unknown control opcode: %s" % opcode)
Python
def create_listener( self, callback_func, port=0, endpoint_error_handling=None, ): """Create and start a listener to accept incoming connections callback_func is the function or coroutine that takes one argument -- the Endpoint connected to the client. Notice, the listening is closed when the returned Listener goes out of scope thus remember to keep a reference to the object. Parameters ---------- callback_func: function or coroutine A callback function that gets invoked when an incoming connection is accepted port: int, optional An unused port number for listening, or `0` to let UCX assign an unused port. endpoint_error_handling: None or boolean, optional Enable endpoint error handling raising exceptions when an error occurs, may incur in performance penalties but prevents a process from terminating unexpectedly that may happen when disabled. None (default) will enable endpoint error handling based on the UCX version, enabling for UCX >= 1.11.0 and disabled for any versions prior to that. This is done to prevent CUDA IPC to be quietly disabled due to lack of support in older UCX versions. Explicitly specifying True/False will override the default. Returns ------- Listener The new listener. When this object is deleted, the listening stops """ self.continuous_ucx_progress() if port is None: port = 0 if endpoint_error_handling is None: endpoint_error_handling = get_ucx_version() >= (1, 11, 0) logger.info("create_listener() - Start listening on port %d" % port) ret = Listener( ucx_api.UCXListener( worker=self.worker, port=port, cb_func=_listener_handler, cb_args=(callback_func, self, endpoint_error_handling), ) ) return ret
def create_listener( self, callback_func, port=0, endpoint_error_handling=None, ): """Create and start a listener to accept incoming connections callback_func is the function or coroutine that takes one argument -- the Endpoint connected to the client. Notice, the listening is closed when the returned Listener goes out of scope thus remember to keep a reference to the object. Parameters ---------- callback_func: function or coroutine A callback function that gets invoked when an incoming connection is accepted port: int, optional An unused port number for listening, or `0` to let UCX assign an unused port. endpoint_error_handling: None or boolean, optional Enable endpoint error handling raising exceptions when an error occurs, may incur in performance penalties but prevents a process from terminating unexpectedly that may happen when disabled. None (default) will enable endpoint error handling based on the UCX version, enabling for UCX >= 1.11.0 and disabled for any versions prior to that. This is done to prevent CUDA IPC to be quietly disabled due to lack of support in older UCX versions. Explicitly specifying True/False will override the default. Returns ------- Listener The new listener. When this object is deleted, the listening stops """ self.continuous_ucx_progress() if port is None: port = 0 if endpoint_error_handling is None: endpoint_error_handling = get_ucx_version() >= (1, 11, 0) logger.info("create_listener() - Start listening on port %d" % port) ret = Listener( ucx_api.UCXListener( worker=self.worker, port=port, cb_func=_listener_handler, cb_args=(callback_func, self, endpoint_error_handling), ) ) return ret
Python
async def create_endpoint(self, ip_address, port, endpoint_error_handling=None): """Create a new endpoint to a server Parameters ---------- ip_address: str IP address of the server the endpoint should connect to port: int IP address of the server the endpoint should connect to endpoint_error_handling: None or boolean, optional Enable endpoint error handling raising exceptions when an error occurs, may incur in performance penalties but prevents a process from terminating unexpectedly that may happen when disabled. None (default) will enable endpoint error handling based on the UCX version, enabling for UCX >= 1.11.0 and disabled for any versions prior to that. This is done to prevent CUDA IPC to be quietly disabled due to lack of support in older UCX versions. Explicitly specifying True/False will override the default. Returns ------- Endpoint The new endpoint """ self.continuous_ucx_progress() if endpoint_error_handling is None: endpoint_error_handling = get_ucx_version() >= (1, 11, 0) ucx_ep = ucx_api.UCXEndpoint.create( self.worker, ip_address, port, endpoint_error_handling ) self.worker.progress() # We create the Endpoint in three steps: # 1) Generate unique IDs to use as tags # 2) Exchange endpoint info such as tags # 3) Use the info to create an endpoint seed = os.urandom(16) msg_tag = hash64bits("msg_tag", seed, ucx_ep.handle) ctrl_tag = hash64bits("ctrl_tag", seed, ucx_ep.handle) peer_info = await exchange_peer_info( endpoint=ucx_ep, msg_tag=msg_tag, ctrl_tag=ctrl_tag, listener=False, ) tags = { "msg_send": peer_info["msg_tag"], "msg_recv": msg_tag, "ctrl_send": peer_info["ctrl_tag"], "ctrl_recv": ctrl_tag, } ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=tags) logger.debug( "create_endpoint() client: %s, error handling: %s, msg-tag-send: %s, " "msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s" % ( hex(ep._ep.handle), endpoint_error_handling, hex(ep._tags["msg_send"]), hex(ep._tags["msg_recv"]), hex(ep._tags["ctrl_send"]), hex(ep._tags["ctrl_recv"]), ) ) # Setup the control receive CtrlMsg.setup_ctrl_recv(ep) return ep
async def create_endpoint(self, ip_address, port, endpoint_error_handling=None): """Create a new endpoint to a server Parameters ---------- ip_address: str IP address of the server the endpoint should connect to port: int IP address of the server the endpoint should connect to endpoint_error_handling: None or boolean, optional Enable endpoint error handling raising exceptions when an error occurs, may incur in performance penalties but prevents a process from terminating unexpectedly that may happen when disabled. None (default) will enable endpoint error handling based on the UCX version, enabling for UCX >= 1.11.0 and disabled for any versions prior to that. This is done to prevent CUDA IPC to be quietly disabled due to lack of support in older UCX versions. Explicitly specifying True/False will override the default. Returns ------- Endpoint The new endpoint """ self.continuous_ucx_progress() if endpoint_error_handling is None: endpoint_error_handling = get_ucx_version() >= (1, 11, 0) ucx_ep = ucx_api.UCXEndpoint.create( self.worker, ip_address, port, endpoint_error_handling ) self.worker.progress() # We create the Endpoint in three steps: # 1) Generate unique IDs to use as tags # 2) Exchange endpoint info such as tags # 3) Use the info to create an endpoint seed = os.urandom(16) msg_tag = hash64bits("msg_tag", seed, ucx_ep.handle) ctrl_tag = hash64bits("ctrl_tag", seed, ucx_ep.handle) peer_info = await exchange_peer_info( endpoint=ucx_ep, msg_tag=msg_tag, ctrl_tag=ctrl_tag, listener=False, ) tags = { "msg_send": peer_info["msg_tag"], "msg_recv": msg_tag, "ctrl_send": peer_info["ctrl_tag"], "ctrl_recv": ctrl_tag, } ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=tags) logger.debug( "create_endpoint() client: %s, error handling: %s, msg-tag-send: %s, " "msg-tag-recv: %s, ctrl-tag-send: %s, ctrl-tag-recv: %s" % ( hex(ep._ep.handle), endpoint_error_handling, hex(ep._tags["msg_send"]), hex(ep._tags["msg_recv"]), hex(ep._tags["ctrl_send"]), hex(ep._tags["ctrl_recv"]), ) ) # Setup the control receive CtrlMsg.setup_ctrl_recv(ep) return ep
Python
async def create_endpoint_from_worker_address( self, address, endpoint_error_handling=None ): """Create a new endpoint to a server Parameters ---------- address: UCXAddress endpoint_error_handling: None or boolean, optional Enable endpoint error handling raising exceptions when an error occurs, may incur in performance penalties but prevents a process from terminating unexpectedly that may happen when disabled. None (default) will enable endpoint error handling based on the UCX version, enabling for UCX >= 1.11.0 and disabled for any versions prior to that. This is done to prevent CUDA IPC to be quietly disabled due to lack of support in older UCX versions. Explicitly specifying True/False will override the default. Returns ------- Endpoint The new endpoint """ self.continuous_ucx_progress() if endpoint_error_handling is None: endpoint_error_handling = get_ucx_version() >= (1, 11, 0) ucx_ep = ucx_api.UCXEndpoint.create_from_worker_address( self.worker, address, endpoint_error_handling, ) self.worker.progress() ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=None) logger.debug( "create_endpoint() client: %s, error handling: %s" % (hex(ep._ep.handle), endpoint_error_handling) ) return ep
async def create_endpoint_from_worker_address( self, address, endpoint_error_handling=None ): """Create a new endpoint to a server Parameters ---------- address: UCXAddress endpoint_error_handling: None or boolean, optional Enable endpoint error handling raising exceptions when an error occurs, may incur in performance penalties but prevents a process from terminating unexpectedly that may happen when disabled. None (default) will enable endpoint error handling based on the UCX version, enabling for UCX >= 1.11.0 and disabled for any versions prior to that. This is done to prevent CUDA IPC to be quietly disabled due to lack of support in older UCX versions. Explicitly specifying True/False will override the default. Returns ------- Endpoint The new endpoint """ self.continuous_ucx_progress() if endpoint_error_handling is None: endpoint_error_handling = get_ucx_version() >= (1, 11, 0) ucx_ep = ucx_api.UCXEndpoint.create_from_worker_address( self.worker, address, endpoint_error_handling, ) self.worker.progress() ep = Endpoint(endpoint=ucx_ep, ctx=self, tags=None) logger.debug( "create_endpoint() client: %s, error handling: %s" % (hex(ep._ep.handle), endpoint_error_handling) ) return ep
Python
def register_am_allocator(self, allocator, allocator_type): """Register an allocator for received Active Messages. The allocator registered by this function is always called by the active message receive callback when an incoming message is available. The appropriate allocator is called depending on whether the message received is a host message or CUDA message. Note that CUDA messages can only be received via rendezvous, all eager messages are received on a host object. By default, the host allocator is `bytearray`. There is no default CUDA allocator and one must always be registered if CUDA is used. Parameters ---------- allocator: callable An allocation function accepting exactly one argument, the size of the message receives. allocator_type: str The type of allocator, currently supports "host" and "cuda". """ if allocator_type == "host": allocator_type = ucx_api.AllocatorType.HOST elif allocator_type == "cuda": allocator_type = ucx_api.AllocatorType.CUDA else: allocator_type = ucx_api.AllocatorType.UNSUPPORTED self.worker.register_am_allocator(allocator, allocator_type)
def register_am_allocator(self, allocator, allocator_type): """Register an allocator for received Active Messages. The allocator registered by this function is always called by the active message receive callback when an incoming message is available. The appropriate allocator is called depending on whether the message received is a host message or CUDA message. Note that CUDA messages can only be received via rendezvous, all eager messages are received on a host object. By default, the host allocator is `bytearray`. There is no default CUDA allocator and one must always be registered if CUDA is used. Parameters ---------- allocator: callable An allocation function accepting exactly one argument, the size of the message receives. allocator_type: str The type of allocator, currently supports "host" and "cuda". """ if allocator_type == "host": allocator_type = ucx_api.AllocatorType.HOST elif allocator_type == "cuda": allocator_type = ucx_api.AllocatorType.CUDA else: allocator_type = ucx_api.AllocatorType.UNSUPPORTED self.worker.register_am_allocator(allocator, allocator_type)
Python
async def recv(self, buffer, tag): """Receive directly on worker without a local Endpoint into `buffer`. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to receive into. Raise ValueError if buffer is smaller than nbytes or read-only. tag: hashable, optional Set a tag that must match the received message. """ if not isinstance(buffer, Array): buffer = Array(buffer) nbytes = buffer.nbytes log = "[Worker Recv] worker: %s, tag: %s, nbytes: %d, type: %s" % ( hex(self.worker.handle), hex(tag), nbytes, type(buffer.obj), ) logger.debug(log) return await comm.tag_recv(self.worker, buffer, nbytes, tag, name=log)
async def recv(self, buffer, tag): """Receive directly on worker without a local Endpoint into `buffer`. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to receive into. Raise ValueError if buffer is smaller than nbytes or read-only. tag: hashable, optional Set a tag that must match the received message. """ if not isinstance(buffer, Array): buffer = Array(buffer) nbytes = buffer.nbytes log = "[Worker Recv] worker: %s, tag: %s, nbytes: %d, type: %s" % ( hex(self.worker.handle), hex(tag), nbytes, type(buffer.obj), ) logger.debug(log) return await comm.tag_recv(self.worker, buffer, nbytes, tag, name=log)
Python
async def close(self): """Close the endpoint cleanly. This will attempt to flush outgoing buffers before actually closing the underlying UCX endpoint. """ if self.closed(): self.abort() return try: # Making sure we only tell peer to shutdown once if self._shutting_down_peer: return self._shutting_down_peer = True # Send a shutdown message to the peer msg = CtrlMsg.serialize(opcode=1, close_after_n_recv=self._send_count) msg_arr = Array(msg) log = "[Send shutdown] ep: %s, tag: %s, close_after_n_recv: %d" % ( hex(self.uid), hex(self._tags["ctrl_send"]), self._send_count, ) logger.debug(log) try: await comm.tag_send( self._ep, msg_arr, msg_arr.nbytes, self._tags["ctrl_send"], name=log ) # The peer might already be shutting down thus we can ignore any send errors except UCXError as e: logging.warning( "UCX failed closing worker %s (probably already closed): %s" % (hex(self.uid), repr(e)) ) finally: if not self.closed(): # Give all current outstanding send() calls a chance to return self._ctx.worker.progress() await asyncio.sleep(0) self.abort()
async def close(self): """Close the endpoint cleanly. This will attempt to flush outgoing buffers before actually closing the underlying UCX endpoint. """ if self.closed(): self.abort() return try: # Making sure we only tell peer to shutdown once if self._shutting_down_peer: return self._shutting_down_peer = True # Send a shutdown message to the peer msg = CtrlMsg.serialize(opcode=1, close_after_n_recv=self._send_count) msg_arr = Array(msg) log = "[Send shutdown] ep: %s, tag: %s, close_after_n_recv: %d" % ( hex(self.uid), hex(self._tags["ctrl_send"]), self._send_count, ) logger.debug(log) try: await comm.tag_send( self._ep, msg_arr, msg_arr.nbytes, self._tags["ctrl_send"], name=log ) # The peer might already be shutting down thus we can ignore any send errors except UCXError as e: logging.warning( "UCX failed closing worker %s (probably already closed): %s" % (hex(self.uid), repr(e)) ) finally: if not self.closed(): # Give all current outstanding send() calls a chance to return self._ctx.worker.progress() await asyncio.sleep(0) self.abort()
Python
async def send(self, buffer, tag=None, force_tag=False): """Send `buffer` to connected peer. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to send. Raise ValueError if buffer is smaller than nbytes. tag: hashable, optional tag: hashable, optional Set a tag that the receiver must match. Currently the tag is hashed together with the internal Endpoint tag that is agreed with the remote end at connection time. To enforce using the user tag, make sure to specify `force_tag=True`. force_tag: bool If true, force using `tag` as is, otherwise the value specified with `tag` (if any) will be hashed with the internal Endpoint tag. """ self._ep.raise_on_error() if self.closed(): raise UCXCloseError("Endpoint closed") if not isinstance(buffer, Array): buffer = Array(buffer) if tag is None: tag = self._tags["msg_send"] elif not force_tag: tag = hash64bits(self._tags["msg_send"], hash(tag)) nbytes = buffer.nbytes log = "[Send #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % ( self._send_count, hex(self.uid), hex(tag), nbytes, type(buffer.obj), ) logger.debug(log) self._send_count += 1 try: return await comm.tag_send(self._ep, buffer, nbytes, tag, name=log) except UCXCanceled as e: # If self._ep has already been closed and destroyed, we reraise the # UCXCanceled exception. if self._ep is None: raise e
async def send(self, buffer, tag=None, force_tag=False): """Send `buffer` to connected peer. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to send. Raise ValueError if buffer is smaller than nbytes. tag: hashable, optional tag: hashable, optional Set a tag that the receiver must match. Currently the tag is hashed together with the internal Endpoint tag that is agreed with the remote end at connection time. To enforce using the user tag, make sure to specify `force_tag=True`. force_tag: bool If true, force using `tag` as is, otherwise the value specified with `tag` (if any) will be hashed with the internal Endpoint tag. """ self._ep.raise_on_error() if self.closed(): raise UCXCloseError("Endpoint closed") if not isinstance(buffer, Array): buffer = Array(buffer) if tag is None: tag = self._tags["msg_send"] elif not force_tag: tag = hash64bits(self._tags["msg_send"], hash(tag)) nbytes = buffer.nbytes log = "[Send #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % ( self._send_count, hex(self.uid), hex(tag), nbytes, type(buffer.obj), ) logger.debug(log) self._send_count += 1 try: return await comm.tag_send(self._ep, buffer, nbytes, tag, name=log) except UCXCanceled as e: # If self._ep has already been closed and destroyed, we reraise the # UCXCanceled exception. if self._ep is None: raise e
Python
async def am_send(self, buffer): """Send `buffer` to connected peer. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to send. Raise ValueError if buffer is smaller than nbytes. """ if self.closed(): raise UCXCloseError("Endpoint closed") if not isinstance(buffer, Array): buffer = Array(buffer) nbytes = buffer.nbytes log = "[AM Send #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % ( self._send_count, hex(self.uid), hex(self._tags["msg_send"]), nbytes, type(buffer.obj), ) logger.debug(log) self._send_count += 1 return await comm.am_send(self._ep, buffer, nbytes, name=log)
async def am_send(self, buffer): """Send `buffer` to connected peer. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to send. Raise ValueError if buffer is smaller than nbytes. """ if self.closed(): raise UCXCloseError("Endpoint closed") if not isinstance(buffer, Array): buffer = Array(buffer) nbytes = buffer.nbytes log = "[AM Send #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % ( self._send_count, hex(self.uid), hex(self._tags["msg_send"]), nbytes, type(buffer.obj), ) logger.debug(log) self._send_count += 1 return await comm.am_send(self._ep, buffer, nbytes, name=log)
Python
async def recv(self, buffer, tag=None, force_tag=False): """Receive from connected peer into `buffer`. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to receive into. Raise ValueError if buffer is smaller than nbytes or read-only. tag: hashable, optional Set a tag that must match the received message. Currently the tag is hashed together with the internal Endpoint tag that is agreed with the remote end at connection time. To enforce using the user tag, make sure to specify `force_tag=True`. force_tag: bool If true, force using `tag` as is, otherwise the value specified with `tag` (if any) will be hashed with the internal Endpoint tag. """ if tag is None: tag = self._tags["msg_recv"] elif not force_tag: tag = hash64bits(self._tags["msg_recv"], hash(tag)) if not self._ctx.worker.tag_probe(tag): self._ep.raise_on_error() if self.closed(): raise UCXCloseError("Endpoint closed") if not isinstance(buffer, Array): buffer = Array(buffer) nbytes = buffer.nbytes log = "[Recv #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % ( self._recv_count, hex(self.uid), hex(tag), nbytes, type(buffer.obj), ) logger.debug(log) self._recv_count += 1 try: ret = await comm.tag_recv(self._ep, buffer, nbytes, tag, name=log) except UCXCanceled as e: # If self._ep has already been closed and destroyed, we reraise the # UCXCanceled exception. if self._ep is None: raise e self._finished_recv_count += 1 if ( self._close_after_n_recv is not None and self._finished_recv_count >= self._close_after_n_recv ): self.abort() return ret
async def recv(self, buffer, tag=None, force_tag=False): """Receive from connected peer into `buffer`. Parameters ---------- buffer: exposing the buffer protocol or array/cuda interface The buffer to receive into. Raise ValueError if buffer is smaller than nbytes or read-only. tag: hashable, optional Set a tag that must match the received message. Currently the tag is hashed together with the internal Endpoint tag that is agreed with the remote end at connection time. To enforce using the user tag, make sure to specify `force_tag=True`. force_tag: bool If true, force using `tag` as is, otherwise the value specified with `tag` (if any) will be hashed with the internal Endpoint tag. """ if tag is None: tag = self._tags["msg_recv"] elif not force_tag: tag = hash64bits(self._tags["msg_recv"], hash(tag)) if not self._ctx.worker.tag_probe(tag): self._ep.raise_on_error() if self.closed(): raise UCXCloseError("Endpoint closed") if not isinstance(buffer, Array): buffer = Array(buffer) nbytes = buffer.nbytes log = "[Recv #%03d] ep: %s, tag: %s, nbytes: %d, type: %s" % ( self._recv_count, hex(self.uid), hex(tag), nbytes, type(buffer.obj), ) logger.debug(log) self._recv_count += 1 try: ret = await comm.tag_recv(self._ep, buffer, nbytes, tag, name=log) except UCXCanceled as e: # If self._ep has already been closed and destroyed, we reraise the # UCXCanceled exception. if self._ep is None: raise e self._finished_recv_count += 1 if ( self._close_after_n_recv is not None and self._finished_recv_count >= self._close_after_n_recv ): self.abort() return ret
Python
async def recv_obj(self, tag=None, allocator=bytearray): """Receive from connected peer that calls `send_obj()`. As opposed to `recv()`, this function returns the received object. Data is received into a buffer allocated by `allocator`. The transfer includes an extra message containing the size of `obj`, which increses the overhead slightly. Parameters ---------- tag: hashable, optional Set a tag that must match the received message. Notice, currently UCX-Py doesn't support a "any tag" thus `tag=None` only matches a send that also sets `tag=None`. allocator: callabale, optional Function to allocate the received object. The function should take the number of bytes to allocate as input and return a new buffer of that size as output. Example ------- >>> await pickle.loads(ep.recv_obj()) """ nbytes = array.array("Q", [0]) await self.recv(nbytes, tag=tag) nbytes = nbytes[0] ret = allocator(nbytes) await self.recv(ret, tag=tag) return ret
async def recv_obj(self, tag=None, allocator=bytearray): """Receive from connected peer that calls `send_obj()`. As opposed to `recv()`, this function returns the received object. Data is received into a buffer allocated by `allocator`. The transfer includes an extra message containing the size of `obj`, which increses the overhead slightly. Parameters ---------- tag: hashable, optional Set a tag that must match the received message. Notice, currently UCX-Py doesn't support a "any tag" thus `tag=None` only matches a send that also sets `tag=None`. allocator: callabale, optional Function to allocate the received object. The function should take the number of bytes to allocate as input and return a new buffer of that size as output. Example ------- >>> await pickle.loads(ep.recv_obj()) """ nbytes = array.array("Q", [0]) await self.recv(nbytes, tag=tag) nbytes = nbytes[0] ret = allocator(nbytes) await self.recv(ret, tag=tag) return ret
Python
async def flush(): """Flushes outstanding AMO and RMA operations. This ensures that the operations issued on this worker have completed both locally and remotely. This function does not guarantee ordering. """ if _ctx is not None: return await _get_ctx().flush() else: # If ctx is not initialized we still want to do the right thing by asyncio return await asyncio.sleep(0)
async def flush(): """Flushes outstanding AMO and RMA operations. This ensures that the operations issued on this worker have completed both locally and remotely. This function does not guarantee ordering. """ if _ctx is not None: return await _get_ctx().flush() else: # If ctx is not initialized we still want to do the right thing by asyncio return await asyncio.sleep(0)
Python
async def _progress_task(self): """This helper function maintains a UCX progress loop.""" while True: worker = self.weakref_worker() if worker is None or not worker.initialized: return worker.progress() del worker # Give other co-routines a chance to run. await asyncio.sleep(0)
async def _progress_task(self): """This helper function maintains a UCX progress loop.""" while True: worker = self.weakref_worker() if worker is None or not worker.initialized: return worker.progress() del worker # Give other co-routines a chance to run. await asyncio.sleep(0)
Python
def decode_mp3(mp3_arr): """ decodes an array if uint8 representing an mp3 file :rtype: np.array """ container = av.open(io.BytesIO(mp3_arr.tobytes())) stream = next(s for s in container.streams if s.type == 'audio') # print(stream) a = [] for i, packet in enumerate(container.demux(stream)): for frame in packet.decode(): a.append(frame.to_ndarray().reshape(-1)) waveform = np.concatenate(a) if waveform.dtype != 'float32': raise RuntimeError("Unexpected wave type") return waveform
def decode_mp3(mp3_arr): """ decodes an array if uint8 representing an mp3 file :rtype: np.array """ container = av.open(io.BytesIO(mp3_arr.tobytes())) stream = next(s for s in container.streams if s.type == 'audio') # print(stream) a = [] for i, packet in enumerate(container.demux(stream)): for frame in packet.decode(): a.append(frame.to_ndarray().reshape(-1)) waveform = np.concatenate(a) if waveform.dtype != 'float32': raise RuntimeError("Unexpected wave type") return waveform
Python
def pad_or_truncate(x, audio_length): """Pad all audio to specific length.""" if len(x) <= audio_length: return np.concatenate((x, np.zeros(audio_length - len(x), dtype=np.float32)), axis=0) else: return x[0: audio_length]
def pad_or_truncate(x, audio_length): """Pad all audio to specific length.""" if len(x) <= audio_length: return np.concatenate((x, np.zeros(audio_length - len(x), dtype=np.float32)), axis=0) else: return x[0: audio_length]
Python
def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] state_dict = {k: v for k, v in state_dict.items()} if "time_new_pos_embed" not in state_dict: # we are working with ImageNet model _logger.info("Adapting pos embedding from ImageNet pretrained model to PaSST.") v = state_dict.pop("pos_embed") new_pos_embed, freq_new_pos_embed, time_new_pos_embed = adapt_image_pos_embed_to_passt( v, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) state_dict["new_pos_embed"] = new_pos_embed state_dict["freq_new_pos_embed"] = freq_new_pos_embed state_dict["time_new_pos_embed"] = time_new_pos_embed for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # this should never occur v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict
def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} if 'model' in state_dict: # For deit models state_dict = state_dict['model'] state_dict = {k: v for k, v in state_dict.items()} if "time_new_pos_embed" not in state_dict: # we are working with ImageNet model _logger.info("Adapting pos embedding from ImageNet pretrained model to PaSST.") v = state_dict.pop("pos_embed") new_pos_embed, freq_new_pos_embed, time_new_pos_embed = adapt_image_pos_embed_to_passt( v, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) state_dict["new_pos_embed"] = new_pos_embed state_dict["freq_new_pos_embed"] = freq_new_pos_embed state_dict["time_new_pos_embed"] = time_new_pos_embed for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k and len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) elif k == 'pos_embed' and v.shape != model.pos_embed.shape: # this should never occur v = resize_pos_embed( v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) out_dict[k] = v return out_dict
Python
def deit_base_distilled_patch16_384(pretrained=False, **kwargs): """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ print("\n\n Loading DEIT BASE 384\n\n") model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) return model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs): """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ print("\n\n Loading DEIT BASE 384\n\n") model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) return model
Python
def insert(self, value: int) -> None: """Insert a new element to the heap Args: value (int): A number Raises: Exception: When the heap is full """ if self.size >= self.max_size: raise Exception("Heap is full") position = self.size self.heap[position] = value self.size += 1 self.__sift_up(position)
def insert(self, value: int) -> None: """Insert a new element to the heap Args: value (int): A number Raises: Exception: When the heap is full """ if self.size >= self.max_size: raise Exception("Heap is full") position = self.size self.heap[position] = value self.size += 1 self.__sift_up(position)
Python
def extract_max(self) -> int: """Extracts the element with highest priority. i) Moves the last leaf to the root. ii) Bubble down the root until the element at the top is the new maximum iii) Return the element which was the max Returns: int: Value of element Raises: Expection: When heap is empty """ if self.size == 0: raise Exception("Heap is empty") max_element = self.heap[0] self.heap[0] = self.heap[self.size - 1] self.__sift_down(0) self.size -= 1 return max_element
def extract_max(self) -> int: """Extracts the element with highest priority. i) Moves the last leaf to the root. ii) Bubble down the root until the element at the top is the new maximum iii) Return the element which was the max Returns: int: Value of element Raises: Expection: When heap is empty """ if self.size == 0: raise Exception("Heap is empty") max_element = self.heap[0] self.heap[0] = self.heap[self.size - 1] self.__sift_down(0) self.size -= 1 return max_element
Python
def remove(self, pos: int) -> int: """Removes the element of the position i Args: pos (int): Position of element to be removed Returns: int: Value of element removed """ self.heap[pos] = float("+inf") self.__sift_up(pos) self.extract_max()
def remove(self, pos: int) -> int: """Removes the element of the position i Args: pos (int): Position of element to be removed Returns: int: Value of element removed """ self.heap[pos] = float("+inf") self.__sift_up(pos) self.extract_max()
Python
def change_priority(self, pos: int, value: int) -> None: """Changes the priority of an element Args: pos (int): Position of element value (int): New priority value """ old_value = self.heap[pos] self.heap[pos] = value if value > old_value: self.__sift_up(pos) else: self.__sift_down(pos)
def change_priority(self, pos: int, value: int) -> None: """Changes the priority of an element Args: pos (int): Position of element value (int): New priority value """ old_value = self.heap[pos] self.heap[pos] = value if value > old_value: self.__sift_up(pos) else: self.__sift_down(pos)
Python
def __sift_up(self, position: int) -> None: """Bubble up the elements to keep the heap constraints Args: position (int): Position of the inserted element """ if position == 0: return parent_pos = self.__parent(position) if self.heap[position] > self.heap[parent_pos]: self.__swap(parent_pos, position) self.__sift_up(parent_pos)
def __sift_up(self, position: int) -> None: """Bubble up the elements to keep the heap constraints Args: position (int): Position of the inserted element """ if position == 0: return parent_pos = self.__parent(position) if self.heap[position] > self.heap[parent_pos]: self.__swap(parent_pos, position) self.__sift_up(parent_pos)
Python
def __sift_down(self, position: int) -> None: """Bubble down the elements to keep the heap constraints Args: position (int): Position of the element to be bubbled down """ # Return if there are no more children to compare to max_index = position left_child_pos = self.__left_child(position) if ( left_child_pos < self.size and self.heap[max_index] < self.heap[left_child_pos] ): max_index = left_child_pos right_child_pos = self.__right_child(position) if ( right_child_pos < self.size and self.heap[max_index] < self.heap[right_child_pos] ): max_index = right_child_pos if max_index != position: self.__swap(max_index, position) self.__sift_down(max_index)
def __sift_down(self, position: int) -> None: """Bubble down the elements to keep the heap constraints Args: position (int): Position of the element to be bubbled down """ # Return if there are no more children to compare to max_index = position left_child_pos = self.__left_child(position) if ( left_child_pos < self.size and self.heap[max_index] < self.heap[left_child_pos] ): max_index = left_child_pos right_child_pos = self.__right_child(position) if ( right_child_pos < self.size and self.heap[max_index] < self.heap[right_child_pos] ): max_index = right_child_pos if max_index != position: self.__swap(max_index, position) self.__sift_down(max_index)
Python
def __parent(self, pos: int) -> int: """Returns the parent position of a given element Args: pos (int): Position of element to find parent Returns: int: Position of parent """ return (pos - 1) // 2
def __parent(self, pos: int) -> int: """Returns the parent position of a given element Args: pos (int): Position of element to find parent Returns: int: Position of parent """ return (pos - 1) // 2
Python
def train(self, train_data, validation_data=None, test_data=None, epochs=1, steps_per_epoch=None, callbacks=[]): """ Main training loop Args: train_data: an iterable of dictionaries from Input Layers to values {Input:data}. (calling iter on this object should yield an iterator for an epoch.) validation_data: an iterable of dictionaries from Input Layers to values {Input:data}. test_data: an iterable of dictionaries from Input Layers to values {Input:data}. epochs (int): number of training epochs. steps_per_epoch: number of steps in an epoch, if not None, epochs are incremented each time this number of steps pass even if the entire train_data has not been transversed. callbacks: ``Callback`` functions scheduled during the training. """ # train loop properties step = Property("step", 0) epoch = Property("epoch", 1) epoch_step = Property("epoch_step", 0) last_loss = Property("last_loss", 0) train_loss = Property("train_loss", None) total_epochs = StaticProperty("total_epochs", value=epochs) test_loss = Property("test_loss", None) # TODO training loop only works with a single optimizer # changing the optimizer during this loop is problematic # without replacing the properties in the scheduler for the new # optimizer optimizer_props: dict = self.optimizer_params[self.optimizer] properties = [step, epoch_step, epoch, train_loss, last_loss, test_loss, total_epochs ] + list(optimizer_props.values()) scheduler = Scheduler(model=self, properties=properties) for callback in callbacks: scheduler.register(callback) if steps_per_epoch is not None and train_data is not None: epoch_data = iter(train_data) if validation_data: validation_cb = Eval(target_property="validation_loss", dataset=validation_data, priority=-2) scheduler.register(validation_cb) if test_data: test_cb = Eval(property="test_loss", dataset=test_data, priority=-1) scheduler.register(test_cb) # MAIN TRAINING LOOP scheduler.trigger(OnLoop(AT.START)) try: while epoch.value <= epochs: # EPOCH START # restart iterator for an epoch if steps_per_epoch is None and train_data is not None: epoch_data = iter(train_data) epoch_step.value = 0 total_loss = 0 scheduler.trigger(OnEpoch(epoch.value, AT.START)) while steps_per_epoch is None or epoch_step.value < steps_per_epoch: try: if train_data is not None: feed_dict = next(epoch_data) else: feed_dict = {} feed_dict, param_feed = Model.parse_input(feed_dict, self.train_inputs) optimizer_props = self.optimizer_params[self.optimizer] for param_name in param_feed: if param_name in optimizer_props: optimizer_props[param_name].value = param_feed[param_name] # updated here because we want this property to give us the current step, to know when # a step ends use OnEveryStep(at=AT.END) epoch_step.value += 1 step.value += 1 # update property values for param_name in param_feed: # add new properties if not in the scheduler if param_name not in scheduler.props: scheduler.observe(Property(name=param_name, value=param_feed[param_name])) else: # only update property if value changes prop = scheduler.props[param_name] # only change the value if this value is different, no need to trigger # redundant updates on the properties that don't change value if prop.value != param_feed[param_name]: prop.value = param_feed[param_name] scheduler.trigger(OnStep(step.value, AT.START)) scheduler.trigger(OnEpochStep(epoch_step.value, AT.START)) *outputs, loss = self.train_step(feed_dict) if not np.isscalar(loss): if isinstance(loss, list): loss = np.mean([np.mean(l) for l in loss]) else: loss = np.mean(loss) last_loss.value = loss total_loss += loss train_loss.value = total_loss / epoch_step.value scheduler.trigger(OnStep(epoch_step.value, AT.END)) scheduler.trigger(OnEpochStep(epoch_step.value, AT.END)) except StopIteration: break # EPOCH END scheduler.trigger(OnEpoch(epoch.value, AT.END)) epoch.value += 1 except StopTrain as e: logging.info("Training stopped: {}".format(str(e))) except Exception as e: logging.exception("Error: " + str(e)) raise e scheduler.trigger(OnLoop(AT.END))
def train(self, train_data, validation_data=None, test_data=None, epochs=1, steps_per_epoch=None, callbacks=[]): """ Main training loop Args: train_data: an iterable of dictionaries from Input Layers to values {Input:data}. (calling iter on this object should yield an iterator for an epoch.) validation_data: an iterable of dictionaries from Input Layers to values {Input:data}. test_data: an iterable of dictionaries from Input Layers to values {Input:data}. epochs (int): number of training epochs. steps_per_epoch: number of steps in an epoch, if not None, epochs are incremented each time this number of steps pass even if the entire train_data has not been transversed. callbacks: ``Callback`` functions scheduled during the training. """ # train loop properties step = Property("step", 0) epoch = Property("epoch", 1) epoch_step = Property("epoch_step", 0) last_loss = Property("last_loss", 0) train_loss = Property("train_loss", None) total_epochs = StaticProperty("total_epochs", value=epochs) test_loss = Property("test_loss", None) # TODO training loop only works with a single optimizer # changing the optimizer during this loop is problematic # without replacing the properties in the scheduler for the new # optimizer optimizer_props: dict = self.optimizer_params[self.optimizer] properties = [step, epoch_step, epoch, train_loss, last_loss, test_loss, total_epochs ] + list(optimizer_props.values()) scheduler = Scheduler(model=self, properties=properties) for callback in callbacks: scheduler.register(callback) if steps_per_epoch is not None and train_data is not None: epoch_data = iter(train_data) if validation_data: validation_cb = Eval(target_property="validation_loss", dataset=validation_data, priority=-2) scheduler.register(validation_cb) if test_data: test_cb = Eval(property="test_loss", dataset=test_data, priority=-1) scheduler.register(test_cb) # MAIN TRAINING LOOP scheduler.trigger(OnLoop(AT.START)) try: while epoch.value <= epochs: # EPOCH START # restart iterator for an epoch if steps_per_epoch is None and train_data is not None: epoch_data = iter(train_data) epoch_step.value = 0 total_loss = 0 scheduler.trigger(OnEpoch(epoch.value, AT.START)) while steps_per_epoch is None or epoch_step.value < steps_per_epoch: try: if train_data is not None: feed_dict = next(epoch_data) else: feed_dict = {} feed_dict, param_feed = Model.parse_input(feed_dict, self.train_inputs) optimizer_props = self.optimizer_params[self.optimizer] for param_name in param_feed: if param_name in optimizer_props: optimizer_props[param_name].value = param_feed[param_name] # updated here because we want this property to give us the current step, to know when # a step ends use OnEveryStep(at=AT.END) epoch_step.value += 1 step.value += 1 # update property values for param_name in param_feed: # add new properties if not in the scheduler if param_name not in scheduler.props: scheduler.observe(Property(name=param_name, value=param_feed[param_name])) else: # only update property if value changes prop = scheduler.props[param_name] # only change the value if this value is different, no need to trigger # redundant updates on the properties that don't change value if prop.value != param_feed[param_name]: prop.value = param_feed[param_name] scheduler.trigger(OnStep(step.value, AT.START)) scheduler.trigger(OnEpochStep(epoch_step.value, AT.START)) *outputs, loss = self.train_step(feed_dict) if not np.isscalar(loss): if isinstance(loss, list): loss = np.mean([np.mean(l) for l in loss]) else: loss = np.mean(loss) last_loss.value = loss total_loss += loss train_loss.value = total_loss / epoch_step.value scheduler.trigger(OnStep(epoch_step.value, AT.END)) scheduler.trigger(OnEpochStep(epoch_step.value, AT.END)) except StopIteration: break # EPOCH END scheduler.trigger(OnEpoch(epoch.value, AT.END)) epoch.value += 1 except StopTrain as e: logging.info("Training stopped: {}".format(str(e))) except Exception as e: logging.exception("Error: " + str(e)) raise e scheduler.trigger(OnLoop(AT.END))
Python
def identity(x, name: str = None) -> tf.Tensor: """ Identity function Returns a tensor with the same content as the input tensor. Args: x (`Tensor`): The input tensor. name (`str`): name for this op Returns: tensor (`Tensor`): of the same shape, type and content of the input tensor. """ return tf.identity(x, name=name)
def identity(x, name: str = None) -> tf.Tensor: """ Identity function Returns a tensor with the same content as the input tensor. Args: x (`Tensor`): The input tensor. name (`str`): name for this op Returns: tensor (`Tensor`): of the same shape, type and content of the input tensor. """ return tf.identity(x, name=name)
Python
def variables(self): """ variables returns a list of **all** variables in the layer state Returns: variables (`List[tf.Variable]`): a list of all variables in the layer state """ return list(self.var_dict().values())
def variables(self): """ variables returns a list of **all** variables in the layer state Returns: variables (`List[tf.Variable]`): a list of all variables in the layer state """ return list(self.var_dict().values())
Python
def filter_args(self, **kwargs): """ filter_args filters a given keyword argument dictionary removing any argument that is not present in the constructor for the current Layer type. Args: **kwargs (`Dict['str',Any]`): keyword arguments to be filtered Returns: new_kwargs (`Dict['str',Any]`): new filtered kwargs """ new_kwargs = dict(kwargs) for key in kwargs: if key not in self.arg_names and not self.arg_spec.varkw: del new_kwargs[key] return new_kwargs
def filter_args(self, **kwargs): """ filter_args filters a given keyword argument dictionary removing any argument that is not present in the constructor for the current Layer type. Args: **kwargs (`Dict['str',Any]`): keyword arguments to be filtered Returns: new_kwargs (`Dict['str',Any]`): new filtered kwargs """ new_kwargs = dict(kwargs) for key in kwargs: if key not in self.arg_names and not self.arg_spec.varkw: del new_kwargs[key] return new_kwargs
Python
def update(self, **kwargs): """ update Updates the config constructor argument dictionary and validates those parameters. Args: **kwargs (`Dict['str',Any]`): new values for constructor named arguments to be updated """ self._validate_args(**kwargs) self.kwargs.update(kwargs)
def update(self, **kwargs): """ update Updates the config constructor argument dictionary and validates those parameters. Args: **kwargs (`Dict['str',Any]`): new values for constructor named arguments to be updated """ self._validate_args(**kwargs) self.kwargs.update(kwargs)
Python
def as_function(self, name="layer_function", compile=False): """ returns a python function of a Tensorflow compiled graph as a callable !!! note This returns the entire graph as a function that terminates on this layer. If you want the function for this layer alone just get the `tf.function(layer.compute)` Args: name (`str`): function name to be returned compile (`bool`): if True, returns a `Tensorflow` compile graph as a callable else, returns a python function. Returns: fn (`Callable`): either a Tensorflow static graph or a python callable function. """ graph = Graph.build(inputs=None, outputs=self) return graph.as_function(name=name, compile=compile)
def as_function(self, name="layer_function", compile=False): """ returns a python function of a Tensorflow compiled graph as a callable !!! note This returns the entire graph as a function that terminates on this layer. If you want the function for this layer alone just get the `tf.function(layer.compute)` Args: name (`str`): function name to be returned compile (`bool`): if True, returns a `Tensorflow` compile graph as a callable else, returns a python function. Returns: fn (`Callable`): either a Tensorflow static graph or a python callable function. """ graph = Graph.build(inputs=None, outputs=self) return graph.as_function(name=name, compile=compile)
Python
def input(self): """ syntax sugar to return a single input if layers only have one input or the last layer if the current layer has more than one input Returns: layer (`Layer`): single input of the current layer """ if hasattr(self, "inputs"): # if len(self.inputs) > 1: # raise AttributeError("this layer has multiple inputs, use .inputs[i] instead") if len(self.inputs) > 0: return self.inputs[0] else: return None
def input(self): """ syntax sugar to return a single input if layers only have one input or the last layer if the current layer has more than one input Returns: layer (`Layer`): single input of the current layer """ if hasattr(self, "inputs"): # if len(self.inputs) > 1: # raise AttributeError("this layer has multiple inputs, use .inputs[i] instead") if len(self.inputs) > 0: return self.inputs[0] else: return None
Python
def _list_extra_dependencies_for_serialization(self, serialization_cache): """ Lists extra dependencies to serialize. Internal sub-classes can override this method to return extra dependencies Args: serialization_cache: A dictionary shared between all objects in the same object graph. This object is passed to both `_list_extra_dependencies_for_serialization` and `_list_functions_for_serialization`. Returns: A dictionary mapping attribute names to trackable objects. """ dependencies = {} if self.inputs: if self.input_graph is not None: layers = self.input_graph.dependency_iter() dependencies = { f"{dep_layer.name}": dep_layer for dep_layer in layers } return dependencies
def _list_extra_dependencies_for_serialization(self, serialization_cache): """ Lists extra dependencies to serialize. Internal sub-classes can override this method to return extra dependencies Args: serialization_cache: A dictionary shared between all objects in the same object graph. This object is passed to both `_list_extra_dependencies_for_serialization` and `_list_functions_for_serialization`. Returns: A dictionary mapping attribute names to trackable objects. """ dependencies = {} if self.inputs: if self.input_graph is not None: layers = self.input_graph.dependency_iter() dependencies = { f"{dep_layer.name}": dep_layer for dep_layer in layers } return dependencies
Python
def reuse_with(self, *layers, name=None): """ Reuse with a different input layer Calls reuse with on the wrapped layer and then creates a new wrapped layer around it, using the current tensor function. """ new_wrapped = self.wrapped.reuse_with(*layers) # forward any previous attributes if we're wrapping over other WrapLayer instances attr_fwd = self.fwd_attr if isinstance(new_wrapped, Wrap): attr_fwd += new_wrapped.fwd_attr if name is None: name = self.name return Wrap(wrapped_layer=new_wrapped, n_units=self.n_units, wrap_fn=self.wrap_fn, fwd_attr=attr_fwd, name=name)
def reuse_with(self, *layers, name=None): """ Reuse with a different input layer Calls reuse with on the wrapped layer and then creates a new wrapped layer around it, using the current tensor function. """ new_wrapped = self.wrapped.reuse_with(*layers) # forward any previous attributes if we're wrapping over other WrapLayer instances attr_fwd = self.fwd_attr if isinstance(new_wrapped, Wrap): attr_fwd += new_wrapped.fwd_attr if name is None: name = self.name return Wrap(wrapped_layer=new_wrapped, n_units=self.n_units, wrap_fn=self.wrap_fn, fwd_attr=attr_fwd, name=name)
Python
def reset(self): """ reset resets the variable using its initializer Returns: an op that can be run to reinitialize the variable """ with layer_scope(self): self.layer_state.variable.assign(self.init(self.shape)) self.layer_state.counter.assign(0)
def reset(self): """ reset resets the variable using its initializer Returns: an op that can be run to reinitialize the variable """ with layer_scope(self): self.layer_state.variable.assign(self.init(self.shape)) self.layer_state.counter.assign(0)
Python
def compute_shape(self): """ !!! problem the problem in other libs like keras is they always assume at least a 2d tensor, which for neural networks is acceptable but if we try this as a general computational block, it breaks Returns: """ input_shape = self.input.shape try: output_shape = fix_reshape_dimensions(input_shape, self.target_shape) except ValueError as e: raise ValueError(f"shape of {self.name} could not be determined") from e return tf.TensorShape(output_shape)
def compute_shape(self): """ !!! problem the problem in other libs like keras is they always assume at least a 2d tensor, which for neural networks is acceptable but if we try this as a general computational block, it breaks Returns: """ input_shape = self.input.shape try: output_shape = fix_reshape_dimensions(input_shape, self.target_shape) except ValueError as e: raise ValueError(f"shape of {self.name} could not be determined") from e return tf.TensorShape(output_shape)
Python
def reuse_with(self, input_layer, name=None, transpose_weights=None, sparse_weights=None, shape=None): """ Reuses the current layer on a different input. """ # if current layer is sharing variables, forward the sharing share_state_with = self if self.share_state_with is None else self.share_state_with if name is None: name = self.name if transpose_weights is None: transpose_weights = self.transpose_weights if sparse_weights is None: sparse_weights = self.sparse_weights return Linear(input_layer=input_layer, n_units=self.n_units, weight_init=self.weight_init, weights=self.weights, transpose_weights=transpose_weights, sparse_weights=sparse_weights, add_bias=self.add_bias, weight_norm=self.weight_norm, name=name, share_state_with=share_state_with, shape=shape)
def reuse_with(self, input_layer, name=None, transpose_weights=None, sparse_weights=None, shape=None): """ Reuses the current layer on a different input. """ # if current layer is sharing variables, forward the sharing share_state_with = self if self.share_state_with is None else self.share_state_with if name is None: name = self.name if transpose_weights is None: transpose_weights = self.transpose_weights if sparse_weights is None: sparse_weights = self.sparse_weights return Linear(input_layer=input_layer, n_units=self.n_units, weight_init=self.weight_init, weights=self.weights, transpose_weights=transpose_weights, sparse_weights=sparse_weights, add_bias=self.add_bias, weight_norm=self.weight_norm, name=name, share_state_with=share_state_with, shape=shape)
Python
def init_state(self): """ Create a recurrent cell from the given config !!! bug "Dev note" The only stateful thing here is the cell which is a layer. Since layers Need to know their input layer for their state to be initialized, we need to give the cell a dummy input. Returns: state (`LayerState`): a state with a cell layer that performs the computations """ layer_state = super().init_state() input_seq = self.input with layer_scope(self): # TODO add input_dim to RNNCells for syntax sugar # create dummy input which is used to init the cell init state without running the entire graph # I guess computing output shape would be useful here # TODO already have that so does it apply x0 = tf.ones_like(input_seq[0]) if self.share_state_with is not None: cell = self.share_state_with.cell cell = cell.reuse_with(input_layer=x0, # previous_state=self.previous_state, regularized=self.regularized) else: cell = self.cell_config(x0, previous_state=self.previous_state) if cell.regularized != self.regularized: # create a new regularized cell if somehow the regularized parameter doesn't match the constructor cell = cell.reuse_with(input_layer=x0, # previous_state=self.previous_state, regularized=self.regularized) layer_state.cell = cell if self.previous_state is None: self.previous_state = cell.previous_state # if no previous state is provided we need to add it from current cell self._inputs += as_list(self.previous_state) self.n_units = cell.n_units return layer_state
def init_state(self): """ Create a recurrent cell from the given config !!! bug "Dev note" The only stateful thing here is the cell which is a layer. Since layers Need to know their input layer for their state to be initialized, we need to give the cell a dummy input. Returns: state (`LayerState`): a state with a cell layer that performs the computations """ layer_state = super().init_state() input_seq = self.input with layer_scope(self): # TODO add input_dim to RNNCells for syntax sugar # create dummy input which is used to init the cell init state without running the entire graph # I guess computing output shape would be useful here # TODO already have that so does it apply x0 = tf.ones_like(input_seq[0]) if self.share_state_with is not None: cell = self.share_state_with.cell cell = cell.reuse_with(input_layer=x0, # previous_state=self.previous_state, regularized=self.regularized) else: cell = self.cell_config(x0, previous_state=self.previous_state) if cell.regularized != self.regularized: # create a new regularized cell if somehow the regularized parameter doesn't match the constructor cell = cell.reuse_with(input_layer=x0, # previous_state=self.previous_state, regularized=self.regularized) layer_state.cell = cell if self.previous_state is None: self.previous_state = cell.previous_state # if no previous state is provided we need to add it from current cell self._inputs += as_list(self.previous_state) self.n_units = cell.n_units return layer_state
Python
def as_concat(self): """ concatenates the sequence produced by a lookup and returns the current lookup viewed as a concat sequence layer Returns: seq_concat (`Wrap`): a `SeqConcat` layer as a view for the Lookup layer """ return Wrap(self, n_units=None, wrap_fn=lambda current_layer: SeqConcat(current_layer, seq_size=self.seq_size), fwd_attr=["weights", "bias", "seq_size"], name="concat")
def as_concat(self): """ concatenates the sequence produced by a lookup and returns the current lookup viewed as a concat sequence layer Returns: seq_concat (`Wrap`): a `SeqConcat` layer as a view for the Lookup layer """ return Wrap(self, n_units=None, wrap_fn=lambda current_layer: SeqConcat(current_layer, seq_size=self.seq_size), fwd_attr=["weights", "bias", "seq_size"], name="concat")
Python
def reuse_with(self, input_layer, name=None): """ Reuses the current layer on a different input. Uses the variables in this layer to create a new Layer instance with a different input_layer Args: input_layer: a ``Lookup` Layer name: name for the new ``Layer`` Return: ``Layer``: a new layer with shared variables with the current layer. """ # if current layer is sharing variables, forward the sharing share_state_with = self if self.share_state_with is None else self.share_state_with if name is None: name = self.name return Lookup(input_layer, seq_size=self.seq_size, embedding_shape=self.embedding_shape, batch_size=self.batch_size, weights=self.weights, weight_init=None, dtype=self.dtype, name=name, share_state_with=share_state_with, batch_padding=self.batch_padding)
def reuse_with(self, input_layer, name=None): """ Reuses the current layer on a different input. Uses the variables in this layer to create a new Layer instance with a different input_layer Args: input_layer: a ``Lookup` Layer name: name for the new ``Layer`` Return: ``Layer``: a new layer with shared variables with the current layer. """ # if current layer is sharing variables, forward the sharing share_state_with = self if self.share_state_with is None else self.share_state_with if name is None: name = self.name return Lookup(input_layer, seq_size=self.seq_size, embedding_shape=self.embedding_shape, batch_size=self.batch_size, weights=self.weights, weight_init=None, dtype=self.dtype, name=name, share_state_with=share_state_with, batch_padding=self.batch_padding)
Python
def compute(self, input_tensor, *previous_state): """ compute layer value based on input `Tensor` values Args: input_tensor: a `Tensor` or `Layer` input to the current cell *previous_state: (previous_h, previous_memory) Returns: `Constant`: a tensor with the cell's output """ previous_h, previous_memory = previous_state output = self.output.compute(input_tensor, previous_h, previous_memory) return output
def compute(self, input_tensor, *previous_state): """ compute layer value based on input `Tensor` values Args: input_tensor: a `Tensor` or `Layer` input to the current cell *previous_state: (previous_h, previous_memory) Returns: `Constant`: a tensor with the cell's output """ previous_h, previous_memory = previous_state output = self.output.compute(input_tensor, previous_h, previous_memory) return output
Python
def layer(n_units=None, name="layer", dtype=None, var_list=None): """ Decorator for functions that returns a layer layer configuration Returns: config (`LayerConfig`): instance that can be called on layers to create a new layer instance """ def function_to_config(fn): if isinstance(fn, LayerConfig): return fn return Lambda.config(fn=fn, n_units=n_units, dtype=dtype, var_list=var_list, name=name) return function_to_config
def layer(n_units=None, name="layer", dtype=None, var_list=None): """ Decorator for functions that returns a layer layer configuration Returns: config (`LayerConfig`): instance that can be called on layers to create a new layer instance """ def function_to_config(fn): if isinstance(fn, LayerConfig): return fn return Lambda.config(fn=fn, n_units=n_units, dtype=dtype, var_list=var_list, name=name) return function_to_config
Python
def rms(x): """ Root mean square (RMS) Also known as quadratic mean is defined as: $x_{\\mathrm{RMS}}=\\sqrt{\\frac{x_{1}^{2}+x_{2}^{2}+\\ldots+x_{n}^{2}}{n}}$ In estimation theory, the root-mean-square deviation of an estimator is a measure of the imperfection of the fit of the estimator to the data. Args: x (`Tensor`): input tensor Returns: result (`Tensor`): scalar tensor with the result of applying the root mean square to the input tensor """ return tf.sqrt(tf.reduce_mean(tf.square(x)))
def rms(x): """ Root mean square (RMS) Also known as quadratic mean is defined as: $x_{\\mathrm{RMS}}=\\sqrt{\\frac{x_{1}^{2}+x_{2}^{2}+\\ldots+x_{n}^{2}}{n}}$ In estimation theory, the root-mean-square deviation of an estimator is a measure of the imperfection of the fit of the estimator to the data. Args: x (`Tensor`): input tensor Returns: result (`Tensor`): scalar tensor with the result of applying the root mean square to the input tensor """ return tf.sqrt(tf.reduce_mean(tf.square(x)))
Python
def sparse_dense_multiply(sp_tensor, dense_tensor, name="sparse_multiply_dense"): """ element-wise sparse_multiply_dense !!! info Uses `sparse_dense_cwise_mul` from Tensorflow but returns a dense result and reshapes the result to match the shape of `sp_tensor` Args: sp_tensor (SparseTensor): a sparse tensor dense_tensor (Tensor): a dense tensor name (str): op name Returns: A dense tensor (Tensor): the result for the multiplication between the sparse and dense tensors """ with tf.name_scope(name): mul = sparse_dense_cwise_mul(sp_tensor.indices, sp_tensor.values, sp_tensor.dense_shape, dense_tensor) mul = tf.reshape(mul, tf.shape(sp_tensor)) return mul
def sparse_dense_multiply(sp_tensor, dense_tensor, name="sparse_multiply_dense"): """ element-wise sparse_multiply_dense !!! info Uses `sparse_dense_cwise_mul` from Tensorflow but returns a dense result and reshapes the result to match the shape of `sp_tensor` Args: sp_tensor (SparseTensor): a sparse tensor dense_tensor (Tensor): a dense tensor name (str): op name Returns: A dense tensor (Tensor): the result for the multiplication between the sparse and dense tensors """ with tf.name_scope(name): mul = sparse_dense_cwise_mul(sp_tensor.indices, sp_tensor.values, sp_tensor.dense_shape, dense_tensor) mul = tf.reshape(mul, tf.shape(sp_tensor)) return mul
Python
def sparse_sparse_dot(sp_tensor1, sp_tensor2, name="sparse_sparse_dot"): """ Returns the dot product between two tensors with the same shape Args: sp_tensor1: a ``SparseTensor`` sp_tensor2: a ``SparseTensor`` name: the name for this op Returns: ``Tensor``: a ``Tensor`` with the result of the dot product """ with tf.name_scope(name): # sparse multiply computes the overlap between two sparse tensors radial_dif = sparse_sparse_multiply(sp_tensor1, sp_tensor2) dot_prod = tf.sparse.reduce_sum(radial_dif, axis=-1) return dot_prod
def sparse_sparse_dot(sp_tensor1, sp_tensor2, name="sparse_sparse_dot"): """ Returns the dot product between two tensors with the same shape Args: sp_tensor1: a ``SparseTensor`` sp_tensor2: a ``SparseTensor`` name: the name for this op Returns: ``Tensor``: a ``Tensor`` with the result of the dot product """ with tf.name_scope(name): # sparse multiply computes the overlap between two sparse tensors radial_dif = sparse_sparse_multiply(sp_tensor1, sp_tensor2) dot_prod = tf.sparse.reduce_sum(radial_dif, axis=-1) return dot_prod
Python
def sparse_sparse_multiply(sp_tensor1, sp_tensor2): """ Element-wise multiplication of two sparse tensors !!! warning if the two sparse tensors don't overlap, returns an empty sparse tensor. Args: sp_tensor1: a `SparseTensor` sp_tensor2: a `SparseTensor` Returns: a `SparseTensor` with the element-wise multiplication of the two sparse tensors """ overlap1 = ops.sparse_overlap(sp_tensor1, sp_tensor2) overlap2 = ops.sparse_overlap(sp_tensor2, sp_tensor1) values = tf.math.multiply(overlap1.values, overlap2.values) return tf.SparseTensor(overlap1.indices, values, overlap1.dense_shape)
def sparse_sparse_multiply(sp_tensor1, sp_tensor2): """ Element-wise multiplication of two sparse tensors !!! warning if the two sparse tensors don't overlap, returns an empty sparse tensor. Args: sp_tensor1: a `SparseTensor` sp_tensor2: a `SparseTensor` Returns: a `SparseTensor` with the element-wise multiplication of the two sparse tensors """ overlap1 = ops.sparse_overlap(sp_tensor1, sp_tensor2) overlap2 = ops.sparse_overlap(sp_tensor2, sp_tensor1) values = tf.math.multiply(overlap1.values, overlap2.values) return tf.SparseTensor(overlap1.indices, values, overlap1.dense_shape)
Python
def sparse_dot(sp_tensor1, tensor2, name=None): """ Returns the dot product between two tensors with the same shape Args: sp_tensor1: a ``SparseTensor`` tensor2: a ``Tensor`` or ``SparseTensor`` name: the name for this op Returns: ``Tensor``: a ``Tensor`` with the result of the dot product """ with tf.name_scope(name): if isinstance(tensor2, tf.Tensor): dense_values = tf.gather_nd(tensor2, sp_tensor1.indices) radial_dif = tf.math.multiply(sp_tensor1.values, dense_values) sp_radial_dif = tf.SparseTensor(indices=sp_tensor1.indices, values=radial_dif, dense_shape=sp_tensor1.dense_shape) dot_prod = tf.sparse.reduce_sum(sp_radial_dif, axis=-1) return dot_prod elif isinstance(tensor2, tf.SparseTensor): return sparse_sparse_dot(sp_tensor1, tensor2) else: raise TypeError( "inputs must be of type Tensor or SparseTensor: tensor2 == {t} found".format(t=type(tensor2)))
def sparse_dot(sp_tensor1, tensor2, name=None): """ Returns the dot product between two tensors with the same shape Args: sp_tensor1: a ``SparseTensor`` tensor2: a ``Tensor`` or ``SparseTensor`` name: the name for this op Returns: ``Tensor``: a ``Tensor`` with the result of the dot product """ with tf.name_scope(name): if isinstance(tensor2, tf.Tensor): dense_values = tf.gather_nd(tensor2, sp_tensor1.indices) radial_dif = tf.math.multiply(sp_tensor1.values, dense_values) sp_radial_dif = tf.SparseTensor(indices=sp_tensor1.indices, values=radial_dif, dense_shape=sp_tensor1.dense_shape) dot_prod = tf.sparse.reduce_sum(sp_radial_dif, axis=-1) return dot_prod elif isinstance(tensor2, tf.SparseTensor): return sparse_sparse_dot(sp_tensor1, tensor2) else: raise TypeError( "inputs must be of type Tensor or SparseTensor: tensor2 == {t} found".format(t=type(tensor2)))
Python
def logit(x, dtype=tf.float32): """ logit The logit is a link function / a transformation of a parameter. It is the logarithm of the odds. $$ logit(p) = log(p/(1-p)) $$ logit(0) = -inf, logit(1) = inf, and logit(p) for p<0 or p>1 yields nan. Args: x (`Tensor`): an input tensor dtype (`DType`): data type Returns: logit (`Tensor`): a tensor f the same shape as `x` """ x = tf.convert_to_tensor(x, dtype) x = tf.math.divide(x, 1 - x) return tf.math.log(x)
def logit(x, dtype=tf.float32): """ logit The logit is a link function / a transformation of a parameter. It is the logarithm of the odds. $$ logit(p) = log(p/(1-p)) $$ logit(0) = -inf, logit(1) = inf, and logit(p) for p<0 or p>1 yields nan. Args: x (`Tensor`): an input tensor dtype (`DType`): data type Returns: logit (`Tensor`): a tensor f the same shape as `x` """ x = tf.convert_to_tensor(x, dtype) x = tf.math.divide(x, 1 - x) return tf.math.log(x)
Python
def categorical_cross_entropy(labels, logits, axis=-1, name="categorical_cross_entropy"): """ Categorical Cross entropy Measures the probability error in discrete classification tasks in which the classes are mutually exclusive. !!! warning This is to be used on the **logits** of a model, not on the predicted labels. Do not call this loss with the output of softmax. See also [from TensorFlow](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits). Args: labels (Tensor): empiric probability distribution. Each row labels[i] must be a valid probability distribution (integrate to 1). logits (Tensor): unscaled log probabilities used to predict the labels with `softmax(logits)` axis (int): The class dimension. Defaulted to -1 which is the last dimension. name (str): op name Returns: tensor (`Tensor`): categorical (softmax) cross-entropy loss. """ return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits, axis=axis, name=name)
def categorical_cross_entropy(labels, logits, axis=-1, name="categorical_cross_entropy"): """ Categorical Cross entropy Measures the probability error in discrete classification tasks in which the classes are mutually exclusive. !!! warning This is to be used on the **logits** of a model, not on the predicted labels. Do not call this loss with the output of softmax. See also [from TensorFlow](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits). Args: labels (Tensor): empiric probability distribution. Each row labels[i] must be a valid probability distribution (integrate to 1). logits (Tensor): unscaled log probabilities used to predict the labels with `softmax(logits)` axis (int): The class dimension. Defaulted to -1 which is the last dimension. name (str): op name Returns: tensor (`Tensor`): categorical (softmax) cross-entropy loss. """ return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits, axis=axis, name=name)
Python
def binary_hinge(labels, logits): """ Binary Hinge Loss Measures the classification error for maximum-margin classification. Margin classifiers like Support Vector Machines (SVM) maximise the distance between the closest examples and the decision boundary separating the binary classes. The hinge loss is defined as: $$ \\ell(y) = \\max(0, 1-t \\cdot y), $$ where $t$ is the intended output (labels) and $y$ are the output logits from the classification decision function, not the predicted class label. Args: labels (`Tensor`): tensor with values -1 or 1. Binary (0 or 1) labels are converted to -1 or 1. logits (`Tensor`): unscaled log probabilities. Returns: tensor (`Tensor`): hinge loss float tensor """ return tf.losses.hinge(labels, logits)
def binary_hinge(labels, logits): """ Binary Hinge Loss Measures the classification error for maximum-margin classification. Margin classifiers like Support Vector Machines (SVM) maximise the distance between the closest examples and the decision boundary separating the binary classes. The hinge loss is defined as: $$ \\ell(y) = \\max(0, 1-t \\cdot y), $$ where $t$ is the intended output (labels) and $y$ are the output logits from the classification decision function, not the predicted class label. Args: labels (`Tensor`): tensor with values -1 or 1. Binary (0 or 1) labels are converted to -1 or 1. logits (`Tensor`): unscaled log probabilities. Returns: tensor (`Tensor`): hinge loss float tensor """ return tf.losses.hinge(labels, logits)
Python
def sparsemax_loss(logits, labels, name="sparsemax_loss"): """ Sparsemax Loss A loss function for the sparsemax activation function. This is similar to `tf.nn.softmax`, but able to output s parse probabilities. !!! info Applicable to multi-label classification problems and attention-based neural networks (e.g. for natural language inference) !!! cite "References" 1. [From Softmax to Sparsemax: A Sparse Model of Attention and Multi-Label Classification](https://arxiv.org/abs/1602.02068) Args: labels (`Tensor`): the target dense labels (one hot encoded) logits (`Tensor`): unnormalized log probabilities name (str): op name Returns: loss (`Tensor`): sparsemax loss """ with tf.name_scope(name): logits = tf.convert_to_tensor(logits) sparsemax = tx.sparsemax(logits) labels = tf.convert_to_tensor(labels, name="labels") shifted_logits = logits - tf.math.reduce_mean(logits, axis=1)[:, tf.newaxis] # sum over support (support = predicted labels) support = tf.cast(sparsemax > 0, sparsemax.dtype) sum_s = support * sparsemax * (shifted_logits - 0.5 * sparsemax) # - z_k + ||q||^2 q_part = labels * (0.5 * labels - shifted_logits) return tf.math.reduce_sum(sum_s + q_part, axis=1)
def sparsemax_loss(logits, labels, name="sparsemax_loss"): """ Sparsemax Loss A loss function for the sparsemax activation function. This is similar to `tf.nn.softmax`, but able to output s parse probabilities. !!! info Applicable to multi-label classification problems and attention-based neural networks (e.g. for natural language inference) !!! cite "References" 1. [From Softmax to Sparsemax: A Sparse Model of Attention and Multi-Label Classification](https://arxiv.org/abs/1602.02068) Args: labels (`Tensor`): the target dense labels (one hot encoded) logits (`Tensor`): unnormalized log probabilities name (str): op name Returns: loss (`Tensor`): sparsemax loss """ with tf.name_scope(name): logits = tf.convert_to_tensor(logits) sparsemax = tx.sparsemax(logits) labels = tf.convert_to_tensor(labels, name="labels") shifted_logits = logits - tf.math.reduce_mean(logits, axis=1)[:, tf.newaxis] # sum over support (support = predicted labels) support = tf.cast(sparsemax > 0, sparsemax.dtype) sum_s = support * sparsemax * (shifted_logits - 0.5 * sparsemax) # - z_k + ||q||^2 q_part = labels * (0.5 * labels - shifted_logits) return tf.math.reduce_sum(sum_s + q_part, axis=1)
Python
def sinkhorn_loss(target, predicted, epsilon, n_iter, cost_fn=None): """ Sinkhorn Loss Alias: * `tx.metrics.sinkhorn` !!! info Optimal Transport (OT) provides a framework from which one can define a more powerful geometry to compare probability distributions. This power comes, however, with a heavy computational price. The cost of computing OT distances scales at least in $O(d^3 log(d))$ when comparing two histograms of dimension $d$. Sinkhorn algorithm alleviate this problem by solving an regularized OT in linear time. Given two measures with n points each with locations x and y outputs an approximation of the Optimal Transport (OT) cost with regularization parameter epsilon, niter is the maximum number of steps in sinkhorn loop !!! cite "References" 1. [Concerning nonnegative matrices and doubly stochastic matrices](https://msp.org/pjm/1967/21-2/p14.xhtml) 2. [Sinkhorn Distances:Lightspeed Computation of Optimal Transport](https://papers.nips.cc/paper/4927-sinkhorn-distances-lightspeed-computation-of-optimal-transport.pdf) Args: predicted (`Tensor`): model distribution target (`Tensor`): ground_truth, empirical distribution epsilon (float): regularization term >0 n_iter (int): number of sinkhorn iterations cost_fn (Callable): function that returns the cost matrix between y_pred and y_true, defaults to $|x_i-y_j|^p$. Returns: cost (`Tensor`): sinkhorn cost of moving from the mass from the model distribution `y_pred` to the empirical distribution `y_true`. """ return tx.sinkhorn(target, predicted, epsilon=epsilon, n_iter=n_iter, cost_fn=cost_fn)
def sinkhorn_loss(target, predicted, epsilon, n_iter, cost_fn=None): """ Sinkhorn Loss Alias: * `tx.metrics.sinkhorn` !!! info Optimal Transport (OT) provides a framework from which one can define a more powerful geometry to compare probability distributions. This power comes, however, with a heavy computational price. The cost of computing OT distances scales at least in $O(d^3 log(d))$ when comparing two histograms of dimension $d$. Sinkhorn algorithm alleviate this problem by solving an regularized OT in linear time. Given two measures with n points each with locations x and y outputs an approximation of the Optimal Transport (OT) cost with regularization parameter epsilon, niter is the maximum number of steps in sinkhorn loop !!! cite "References" 1. [Concerning nonnegative matrices and doubly stochastic matrices](https://msp.org/pjm/1967/21-2/p14.xhtml) 2. [Sinkhorn Distances:Lightspeed Computation of Optimal Transport](https://papers.nips.cc/paper/4927-sinkhorn-distances-lightspeed-computation-of-optimal-transport.pdf) Args: predicted (`Tensor`): model distribution target (`Tensor`): ground_truth, empirical distribution epsilon (float): regularization term >0 n_iter (int): number of sinkhorn iterations cost_fn (Callable): function that returns the cost matrix between y_pred and y_true, defaults to $|x_i-y_j|^p$. Returns: cost (`Tensor`): sinkhorn cost of moving from the mass from the model distribution `y_pred` to the empirical distribution `y_true`. """ return tx.sinkhorn(target, predicted, epsilon=epsilon, n_iter=n_iter, cost_fn=cost_fn)
Python
def sparse_ones(indices, dense_shape, dtype=tf.float32, name="sparse_ones"): """ Creates a new `SparseTensor` with the given indices having value 1 Args: indices (`Tensor`): a rank 2 tensor with the `(row,column)` indices for the resulting sparse tensor dense_shape (`Tensor` or `TensorShape`): the output dense shape dtype (`tf.DType`): the tensor type for the values name (`str`): sparse_ones op Returns: sp_tensor (`SparseTensor`): a new sparse tensor with values set to 1 """ with tf.name_scope(name=name): indices = as_tensor(indices, tf.int64) dense_shape = as_tensor(dense_shape, tf.int64) indices_shape = indices.shape values = tf.ones([indices_shape[0]], dtype) return tf.SparseTensor(indices, values, dense_shape)
def sparse_ones(indices, dense_shape, dtype=tf.float32, name="sparse_ones"): """ Creates a new `SparseTensor` with the given indices having value 1 Args: indices (`Tensor`): a rank 2 tensor with the `(row,column)` indices for the resulting sparse tensor dense_shape (`Tensor` or `TensorShape`): the output dense shape dtype (`tf.DType`): the tensor type for the values name (`str`): sparse_ones op Returns: sp_tensor (`SparseTensor`): a new sparse tensor with values set to 1 """ with tf.name_scope(name=name): indices = as_tensor(indices, tf.int64) dense_shape = as_tensor(dense_shape, tf.int64) indices_shape = indices.shape values = tf.ones([indices_shape[0]], dtype) return tf.SparseTensor(indices, values, dense_shape)
Python
def sparse_zeros(indices, dense_shape, dtype=tf.float32, name="sparse_zeros"): """ Creates a new `SparseTensor` with the given indices having value 0 Args: indices (`Tensor`): a rank 2 tensor with the `(row,column)` indices for the resulting sparse tensor dense_shape (`Tensor` or `TensorShape`): the output dense shape dtype (`tf.DType`): the tensor type for the values name (`str`): sparse_ones op Returns: sp_tensor (`SparseTensor`): a new sparse tensor with values set to 0 """ with tf.name_scope(name=name): indices = as_tensor(indices, tf.int64) dense_shape = as_tensor(dense_shape, tf.int64) indices_shape = tf.shape(indices) values = tf.zeros([indices_shape[0]], dtype) return tf.SparseTensor(indices, values, dense_shape)
def sparse_zeros(indices, dense_shape, dtype=tf.float32, name="sparse_zeros"): """ Creates a new `SparseTensor` with the given indices having value 0 Args: indices (`Tensor`): a rank 2 tensor with the `(row,column)` indices for the resulting sparse tensor dense_shape (`Tensor` or `TensorShape`): the output dense shape dtype (`tf.DType`): the tensor type for the values name (`str`): sparse_ones op Returns: sp_tensor (`SparseTensor`): a new sparse tensor with values set to 0 """ with tf.name_scope(name=name): indices = as_tensor(indices, tf.int64) dense_shape = as_tensor(dense_shape, tf.int64) indices_shape = tf.shape(indices) values = tf.zeros([indices_shape[0]], dtype) return tf.SparseTensor(indices, values, dense_shape)
Python
def sparse_indices(sp_values, name="sparse_indices"): """ Returns a `SparseTensor` with the values containing column indices for the active values on a given `SparseTensor`. !!! example "Use Case" To be used with ``embedding_lookup_sparse`` when we need two `SparseTensor` objects with the indices and values Args: sp_values (`SparseTensor`): a sparse tensor for which we extract the active indices. name (`str`): name for sparse_indices op Returns: sp_indices (`SparseTensor`): a sparse tensor with the column indices """ with tf.name_scope(name=name): if len(sp_values.get_shape().dims) == 1: [flat_indices] = tf.unstack(sp_values.indices, num=1, axis=-1) else: _, flat_indices = tf.unstack(sp_values.indices, num=2, axis=-1) sp_indices = tf.SparseTensor(sp_values.indices, flat_indices, sp_values.dense_shape) return sp_indices
def sparse_indices(sp_values, name="sparse_indices"): """ Returns a `SparseTensor` with the values containing column indices for the active values on a given `SparseTensor`. !!! example "Use Case" To be used with ``embedding_lookup_sparse`` when we need two `SparseTensor` objects with the indices and values Args: sp_values (`SparseTensor`): a sparse tensor for which we extract the active indices. name (`str`): name for sparse_indices op Returns: sp_indices (`SparseTensor`): a sparse tensor with the column indices """ with tf.name_scope(name=name): if len(sp_values.get_shape().dims) == 1: [flat_indices] = tf.unstack(sp_values.indices, num=1, axis=-1) else: _, flat_indices = tf.unstack(sp_values.indices, num=2, axis=-1) sp_indices = tf.SparseTensor(sp_values.indices, flat_indices, sp_values.dense_shape) return sp_indices
Python
def repeat(x, n, name="repeat"): """ Repeats the values of a tensor along the last dimension Args: x (`Tensor`): input tensor n (`int`): number of repetitions of each element name (`str`): name for the repeat op Returns: tensor (`Tensor`): tensor with shape [shape[:-1, ], shape[-1:, ] * n] """ with tf.name_scope(name): x = tf.convert_to_tensor(x) n = as_tensor(n, dtype=x.dtype) shape = tf.shape(x, out_type=x.dtype) flat_x = tf.reshape(x, [-1]) rep_x = tf.tile(tf.expand_dims(flat_x, -1), tf.stack([1, n])) new_shape = tf.concat([shape[:-1, ], shape[-1:, ] * n], axis=-1) rep_x = tf.reshape(rep_x, new_shape) return rep_x
def repeat(x, n, name="repeat"): """ Repeats the values of a tensor along the last dimension Args: x (`Tensor`): input tensor n (`int`): number of repetitions of each element name (`str`): name for the repeat op Returns: tensor (`Tensor`): tensor with shape [shape[:-1, ], shape[-1:, ] * n] """ with tf.name_scope(name): x = tf.convert_to_tensor(x) n = as_tensor(n, dtype=x.dtype) shape = tf.shape(x, out_type=x.dtype) flat_x = tf.reshape(x, [-1]) rep_x = tf.tile(tf.expand_dims(flat_x, -1), tf.stack([1, n])) new_shape = tf.concat([shape[:-1, ], shape[-1:, ] * n], axis=-1) rep_x = tf.reshape(rep_x, new_shape) return rep_x
Python
def matrix_indices(index_tensor, dtype=tf.int64, sort_indices=True, name="matrix_indices"): """ Transforms a batch of column indices into a batch of matrix indices Args: index_tensor (`Tensor`): a tensor with shape `(b,n)` with a batch of `n` column indices. dtype (`DType`): the output dtype for the indices. Defaults to `int64`. sort_indices (`bool`): if `True`, output indices are sorted in canonical row-major order. name (`str`): name for this op. Returns: tensor (`Tensor`): tensor with shape `[b,2]` for each index in the input tensor with the corresponding matrix indices """ with tf.name_scope(name): index_tensor = as_tensor(index_tensor, dtype) if len(index_tensor.shape) < 2: index_tensor = tf.expand_dims(index_tensor, 0) shape = tf.shape(index_tensor, out_type=dtype) row_indices = tf.range(0, shape[0]) row_indices = repeat(row_indices, shape[-1]) # sort ascending if sort_indices: sorted_indices, _ = tf.nn.top_k(tf.cast(index_tensor, tf.int32), k=tf.cast(shape[-1], tf.int32)) sorted_indices = tf.reverse(sorted_indices, axis=[-1]) col_indices = sorted_indices else: col_indices = index_tensor col_indices = tf.reshape(col_indices, [-1]) col_indices = tf.cast(col_indices, dtype) indices = tf.stack([row_indices, col_indices], axis=-1) return indices
def matrix_indices(index_tensor, dtype=tf.int64, sort_indices=True, name="matrix_indices"): """ Transforms a batch of column indices into a batch of matrix indices Args: index_tensor (`Tensor`): a tensor with shape `(b,n)` with a batch of `n` column indices. dtype (`DType`): the output dtype for the indices. Defaults to `int64`. sort_indices (`bool`): if `True`, output indices are sorted in canonical row-major order. name (`str`): name for this op. Returns: tensor (`Tensor`): tensor with shape `[b,2]` for each index in the input tensor with the corresponding matrix indices """ with tf.name_scope(name): index_tensor = as_tensor(index_tensor, dtype) if len(index_tensor.shape) < 2: index_tensor = tf.expand_dims(index_tensor, 0) shape = tf.shape(index_tensor, out_type=dtype) row_indices = tf.range(0, shape[0]) row_indices = repeat(row_indices, shape[-1]) # sort ascending if sort_indices: sorted_indices, _ = tf.nn.top_k(tf.cast(index_tensor, tf.int32), k=tf.cast(shape[-1], tf.int32)) sorted_indices = tf.reverse(sorted_indices, axis=[-1]) col_indices = sorted_indices else: col_indices = index_tensor col_indices = tf.reshape(col_indices, [-1]) col_indices = tf.cast(col_indices, dtype) indices = tf.stack([row_indices, col_indices], axis=-1) return indices
Python
def dense_one_hot(column_indices, num_cols, dtype=tf.float32, reduce=True, name="dense_one_hot"): """Transforms a batch of indices to a dense `Tensor` by adding the `one-hot` encoding for each index. Example: ```python indices = [[0],[1]] dense_shape = [2,2] dense_one_hot = [[1,0],[0,1]] ``` Args: column_indices: a dense `Tensor` with the active indices for each sample (row). num_cols: number of columns for the one-hot encoding dtype: the type for the output tensor. reduce (`bool`): if true applies reduce sum on last dimension, name: name for this op Returns: `Tensor`: A dense `Tensor` with a `one-hot encoding` for the given indices. """ with tf.name_scope(name=name): column_indices = as_tensor(column_indices, tf.int64) one_hot_dense = tf.one_hot(column_indices, depth=num_cols, dtype=dtype) if column_indices.get_shape().ndims >= 2 and reduce: one_hot_dense = tf.math.reduce_sum(one_hot_dense, axis=1) return one_hot_dense
def dense_one_hot(column_indices, num_cols, dtype=tf.float32, reduce=True, name="dense_one_hot"): """Transforms a batch of indices to a dense `Tensor` by adding the `one-hot` encoding for each index. Example: ```python indices = [[0],[1]] dense_shape = [2,2] dense_one_hot = [[1,0],[0,1]] ``` Args: column_indices: a dense `Tensor` with the active indices for each sample (row). num_cols: number of columns for the one-hot encoding dtype: the type for the output tensor. reduce (`bool`): if true applies reduce sum on last dimension, name: name for this op Returns: `Tensor`: A dense `Tensor` with a `one-hot encoding` for the given indices. """ with tf.name_scope(name=name): column_indices = as_tensor(column_indices, tf.int64) one_hot_dense = tf.one_hot(column_indices, depth=num_cols, dtype=dtype) if column_indices.get_shape().ndims >= 2 and reduce: one_hot_dense = tf.math.reduce_sum(one_hot_dense, axis=1) return one_hot_dense
Python
def sparse_matrix_indices(column_indices, num_cols, dtype=tf.float32, name="sparse_one_hot"): """Transforms a batch of column indices to a one-hot encoding `SparseTensor`. Example: ``` python indices = [[0,1,4], [1,2,6]] dense_shape = [2,10] sp_one_hot = sparse_one_hot(indices,dense_shape) expected = tf.SparseTensor(indices=[[0,0], [0,1], [0,4], [1,1], [1,2], [1,6]], values=[1,1,1,1,1,1], dense_shape=[2,10]) ``` Args: column_indices (`Tensor`): a dense tensor with the indices to be active for each sample (row) num_cols (`int`): number of columns for the one-hot encoding dtype (`tf.DType`): the type for the output values. name (`str`): name for this op Returns: sp_tensor (`SparseTensor`): a sparse tensor with the one hot encoding for the given indices """ with tf.name_scope(name=name): column_indices = as_tensor(column_indices, tf.int64) indices = matrix_indices(column_indices, dtype=tf.int64) dense_shape = tf.cast([tf.shape(column_indices)[0], num_cols], dtype=tf.int64) return sparse_ones(indices, dense_shape, dtype)
def sparse_matrix_indices(column_indices, num_cols, dtype=tf.float32, name="sparse_one_hot"): """Transforms a batch of column indices to a one-hot encoding `SparseTensor`. Example: ``` python indices = [[0,1,4], [1,2,6]] dense_shape = [2,10] sp_one_hot = sparse_one_hot(indices,dense_shape) expected = tf.SparseTensor(indices=[[0,0], [0,1], [0,4], [1,1], [1,2], [1,6]], values=[1,1,1,1,1,1], dense_shape=[2,10]) ``` Args: column_indices (`Tensor`): a dense tensor with the indices to be active for each sample (row) num_cols (`int`): number of columns for the one-hot encoding dtype (`tf.DType`): the type for the output values. name (`str`): name for this op Returns: sp_tensor (`SparseTensor`): a sparse tensor with the one hot encoding for the given indices """ with tf.name_scope(name=name): column_indices = as_tensor(column_indices, tf.int64) indices = matrix_indices(column_indices, dtype=tf.int64) dense_shape = tf.cast([tf.shape(column_indices)[0], num_cols], dtype=tf.int64) return sparse_ones(indices, dense_shape, dtype)
Python
def dropout(tensor, noise_shape=None, random_mask=None, probability=0.1, scale=True, seed=None, return_mask=False, name="dropout"): """ With probability `probability`, outputs `0` otherwise outputs the input element. If ``scale`` is True, the input elements are scaled up by `1 / (1-probability)` so that the expected sum of the activations is unchanged. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. For example, if `shape(x) = [k, l, m, n]` and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be kept independently and each row and column will be kept or not kept together. Args: tensor (`Tensor`): an input tensor noise_shape (`Tensor`): A 1-D `Tensor` of type `int32`, representing the shape for randomly generated drop flags return_mask (`bool`): if `True`, returns the random mask used random_mask (`Tensor`): a tensor used to create the random bernoulli mask probability (`float` or `Tensor`): A scalar `Tensor` with the same type as x. The probability that each element is kept. scale (`bool`): if true rescales the non-zero elements to 1 / (1-drop_probability) seed (`int`): A Python integer with the random number generator seed name (`str`): a name for this operation Returns: tensor (`Tensor`): output tensor with the same `DType` as the input Raises: ValueError: if `probability` is not in `[0, 1]` or if `x` is not a floating point tensor. """ with tf.name_scope(name): tensor = tf.convert_to_tensor(tensor, name="x") if random_mask is not None: random_mask = as_tensor(random_mask, tensor.dtype) if not tensor.dtype.is_floating: try: tensor = tf.cast(tensor, tf.float32) except Exception: raise ValueError("x has to be a floating point tensor since it might be scaled" "Got a %s tensor instead. and could not cast it" % tensor.dtype) if not 0 <= probability < 1: raise ValueError("drop probability must be a scalar tensor or a float in the " "range [0, 1), got %g" % probability) # Early return if nothing needs to be dropped. if isinstance(probability, float) and probability == 0: if return_mask: return tensor, None else: return tensor elif isinstance(probability, float) and probability == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros probability = tf.convert_to_tensor( probability, dtype=tensor.dtype, name="drop_probability") probability.get_shape().assert_is_compatible_with(tf.TensorShape([])) # Do nothing if we know drop_probability == 0 const_val = tensor_util.constant_value(probability) if const_val == 0: if return_mask: return tensor, None else: return tensor elif const_val == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros noise_shape = _get_noise_shape(tensor, noise_shape) if random_mask is None: with tf.name_scope(name="random_mask"): keep_prob = 1 - probability random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype) mask = keep_prob + random_state random_mask = tf.math.floor(mask, name="binary_mask") if scale: ret = tf.math.divide(tensor, tf.math.maximum(1 - probability, 1e-10)) * random_mask else: ret = tensor * random_mask if not tf.executing_eagerly(): ret.set_shape(tensor.get_shape()) if return_mask: return ret, random_mask else: return ret
def dropout(tensor, noise_shape=None, random_mask=None, probability=0.1, scale=True, seed=None, return_mask=False, name="dropout"): """ With probability `probability`, outputs `0` otherwise outputs the input element. If ``scale`` is True, the input elements are scaled up by `1 / (1-probability)` so that the expected sum of the activations is unchanged. By default, each element is kept or dropped independently. If `noise_shape` is specified, it must be [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]` will make independent decisions. For example, if `shape(x) = [k, l, m, n]` and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be kept independently and each row and column will be kept or not kept together. Args: tensor (`Tensor`): an input tensor noise_shape (`Tensor`): A 1-D `Tensor` of type `int32`, representing the shape for randomly generated drop flags return_mask (`bool`): if `True`, returns the random mask used random_mask (`Tensor`): a tensor used to create the random bernoulli mask probability (`float` or `Tensor`): A scalar `Tensor` with the same type as x. The probability that each element is kept. scale (`bool`): if true rescales the non-zero elements to 1 / (1-drop_probability) seed (`int`): A Python integer with the random number generator seed name (`str`): a name for this operation Returns: tensor (`Tensor`): output tensor with the same `DType` as the input Raises: ValueError: if `probability` is not in `[0, 1]` or if `x` is not a floating point tensor. """ with tf.name_scope(name): tensor = tf.convert_to_tensor(tensor, name="x") if random_mask is not None: random_mask = as_tensor(random_mask, tensor.dtype) if not tensor.dtype.is_floating: try: tensor = tf.cast(tensor, tf.float32) except Exception: raise ValueError("x has to be a floating point tensor since it might be scaled" "Got a %s tensor instead. and could not cast it" % tensor.dtype) if not 0 <= probability < 1: raise ValueError("drop probability must be a scalar tensor or a float in the " "range [0, 1), got %g" % probability) # Early return if nothing needs to be dropped. if isinstance(probability, float) and probability == 0: if return_mask: return tensor, None else: return tensor elif isinstance(probability, float) and probability == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros probability = tf.convert_to_tensor( probability, dtype=tensor.dtype, name="drop_probability") probability.get_shape().assert_is_compatible_with(tf.TensorShape([])) # Do nothing if we know drop_probability == 0 const_val = tensor_util.constant_value(probability) if const_val == 0: if return_mask: return tensor, None else: return tensor elif const_val == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros noise_shape = _get_noise_shape(tensor, noise_shape) if random_mask is None: with tf.name_scope(name="random_mask"): keep_prob = 1 - probability random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype) mask = keep_prob + random_state random_mask = tf.math.floor(mask, name="binary_mask") if scale: ret = tf.math.divide(tensor, tf.math.maximum(1 - probability, 1e-10)) * random_mask else: ret = tensor * random_mask if not tf.executing_eagerly(): ret.set_shape(tensor.get_shape()) if return_mask: return ret, random_mask else: return ret
Python
def alpha_dropout(tensor, noise_shape=None, random_mask=None, probability=0.1, seed=None, return_mask=False, name="dropout"): """ Alpha Dropout keeps mean and variance of inputs in order to ensure the self-normalization after dropout. Alpha dropout is proposed for Scaled Exponential Linear Units (SELUs) because it randomly sets activations to the negative saturation value rather than 0. The multiplicative noise will have standard deviation $\\sqrt{\\frac{probability}{(1-probability)}} !!! cite "References" 1. [Self-Normalizing Neural Networks](https://arxiv.org/pdf/1706.02515.pdf) Args: tensor (`Tensor`): A floating point tensor. noise_shape (`Tensor`): A 1-D `Tensor` of type `int32`, representing the shape for randomly generated drop flags return_mask (`bool`): if true, returns the random mask used random_mask (`Tensor`): a tensor used to create the random bernoulli mask probability (`float` or `Tensor`): A scalar `Tensor` with the same type as x. The probability that each element is kept. seed (`int`): A Python integer with the random number generator seed name (`str`): a name for this operation (optional) Returns: result (`Tensor`): a tensor with the same shape as the input with the dropped units set to negative values """ tensor = tf.convert_to_tensor(tensor, name="x") with tf.name_scope(name): if random_mask is not None: random_mask = as_tensor(random_mask, tensor.dtype) if not tensor.dtype.is_floating: try: tensor = tf.cast(tensor, tf.float32) except Exception: raise ValueError("x has to be a floating point tensor since it might be scaled" "Got a %s tensor instead. and could not cast it" % tensor.dtype) if not 0 <= probability < 1: raise ValueError("drop probability must be a scalar tensor or a float in the " "range [0, 1), got %g" % probability) # Early return if nothing needs to be dropped. if isinstance(probability, float) and probability == 0: if return_mask: return tensor, None else: return tensor elif isinstance(probability, float) and probability == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros probability = tf.convert_to_tensor( probability, dtype=tensor.dtype, name="drop_probability") probability.get_shape().assert_is_compatible_with(tf.TensorShape([])) # Do nothing if we know drop_probability == 0 const_val = tensor_util.constant_value(probability) if const_val == 0: if return_mask: return tensor, None else: return tensor elif const_val == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros noise_shape = _get_noise_shape(tensor, noise_shape) if random_mask is None: with tf.name_scope(name="random_mask"): keep_prob = 1 - probability random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype) mask = keep_prob + random_state random_mask = tf.math.floor(mask, name="binary_mask") alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 alpha_p = -alpha * scale # Get affine transformation params a = ((1 - probability) * (1 + probability * alpha_p ** 2)) ** -0.5 b = -a * alpha_p * probability # Apply mask x = tensor * random_mask + alpha_p * (1 - random_mask) # Do affine transformation return a * x + b
def alpha_dropout(tensor, noise_shape=None, random_mask=None, probability=0.1, seed=None, return_mask=False, name="dropout"): """ Alpha Dropout keeps mean and variance of inputs in order to ensure the self-normalization after dropout. Alpha dropout is proposed for Scaled Exponential Linear Units (SELUs) because it randomly sets activations to the negative saturation value rather than 0. The multiplicative noise will have standard deviation $\\sqrt{\\frac{probability}{(1-probability)}} !!! cite "References" 1. [Self-Normalizing Neural Networks](https://arxiv.org/pdf/1706.02515.pdf) Args: tensor (`Tensor`): A floating point tensor. noise_shape (`Tensor`): A 1-D `Tensor` of type `int32`, representing the shape for randomly generated drop flags return_mask (`bool`): if true, returns the random mask used random_mask (`Tensor`): a tensor used to create the random bernoulli mask probability (`float` or `Tensor`): A scalar `Tensor` with the same type as x. The probability that each element is kept. seed (`int`): A Python integer with the random number generator seed name (`str`): a name for this operation (optional) Returns: result (`Tensor`): a tensor with the same shape as the input with the dropped units set to negative values """ tensor = tf.convert_to_tensor(tensor, name="x") with tf.name_scope(name): if random_mask is not None: random_mask = as_tensor(random_mask, tensor.dtype) if not tensor.dtype.is_floating: try: tensor = tf.cast(tensor, tf.float32) except Exception: raise ValueError("x has to be a floating point tensor since it might be scaled" "Got a %s tensor instead. and could not cast it" % tensor.dtype) if not 0 <= probability < 1: raise ValueError("drop probability must be a scalar tensor or a float in the " "range [0, 1), got %g" % probability) # Early return if nothing needs to be dropped. if isinstance(probability, float) and probability == 0: if return_mask: return tensor, None else: return tensor elif isinstance(probability, float) and probability == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros probability = tf.convert_to_tensor( probability, dtype=tensor.dtype, name="drop_probability") probability.get_shape().assert_is_compatible_with(tf.TensorShape([])) # Do nothing if we know drop_probability == 0 const_val = tensor_util.constant_value(probability) if const_val == 0: if return_mask: return tensor, None else: return tensor elif const_val == 1: zeros = tf.zeros_like(tensor) if return_mask: return zeros, None else: return zeros noise_shape = _get_noise_shape(tensor, noise_shape) if random_mask is None: with tf.name_scope(name="random_mask"): keep_prob = 1 - probability random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype) mask = keep_prob + random_state random_mask = tf.math.floor(mask, name="binary_mask") alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 alpha_p = -alpha * scale # Get affine transformation params a = ((1 - probability) * (1 + probability * alpha_p ** 2)) ** -0.5 b = -a * alpha_p * probability # Apply mask x = tensor * random_mask + alpha_p * (1 - random_mask) # Do affine transformation return a * x + b
Python
def binary_random_mask(tensor, mask_probability=0.0, seed=None): """ Creates a binary mask with the same shape as the given tensor, randomly generated from the given mask probability. Args: tensor (`Tensor`): tensor for which we would like to create a mask mask_probability (`float`, `Tensor`): scalar tensor or float with probability of masking a given value seed (`int`): seed for random number generator Returns: binary_mask (`Tensor`): a tensor with values `0` or `1` with the same shape as the input tensor """ with tf.name_scope(name="random_mask"): tensor = as_tensor(tensor) noise_shape = _get_noise_shape(tensor, None) keep_prob = 1 - mask_probability random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype) mask = keep_prob + random_state random_mask = tf.math.floor(mask, name="binary_mask") return random_mask
def binary_random_mask(tensor, mask_probability=0.0, seed=None): """ Creates a binary mask with the same shape as the given tensor, randomly generated from the given mask probability. Args: tensor (`Tensor`): tensor for which we would like to create a mask mask_probability (`float`, `Tensor`): scalar tensor or float with probability of masking a given value seed (`int`): seed for random number generator Returns: binary_mask (`Tensor`): a tensor with values `0` or `1` with the same shape as the input tensor """ with tf.name_scope(name="random_mask"): tensor = as_tensor(tensor) noise_shape = _get_noise_shape(tensor, None) keep_prob = 1 - mask_probability random_state = tf.random.uniform(noise_shape, seed=seed, dtype=tensor.dtype) mask = keep_prob + random_state random_mask = tf.math.floor(mask, name="binary_mask") return random_mask
Python
def sparse_dropout(sp_tensor, probability=0.2, scale=True, seed=None, mask=None, return_mask=False, alpha=False, name="sparse_dropout"): """ Performs a dropout on a `SparseTensor`. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: sp_tensor (`SparseTensor`): a sparse tensor on which the dropout is performed. mask (`Tensor`): a binary random mask to be applied to the values of this tensor return_mask (`bool`): if true returns the random_mask used to perform dropout (result,random_mask) probability (`float`, `Tensor`): A scalar tensor with the same type as x. The probability that each element is kept. scale (`bool`): if True rescales the input to 1 / keep_prob else simply drops without rescaling seed (`int): A Python integer used as seed. (See `TensorFlow` documentation for ``tf.set_random_seed`` for behavior.) alpha (`bool`): if True uses `alpha_dropout` instead of `dropout` in the inputs name (`str`): A name for this operation (optional). """ with tf.name_scope(name=name): dense_shape = sp_tensor.dense_shape if not sp_tensor.values.dtype.is_floating: raise ValueError("sp_tensor has to be a floating point tensor since its values are going to" " be scaled. Got a %s tensor instead." % sp_tensor.dtype) if not 0 <= probability < 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % probability) probability = tf.convert_to_tensor(probability, dtype=sp_tensor.dtype, name="drop_probability") if alpha: drop_values = alpha_dropout(tensor=sp_tensor.values, random_mask=mask, probability=probability, return_mask=return_mask, seed=seed ) else: drop_values = dropout(tensor=sp_tensor.values, random_mask=mask, probability=probability, scale=scale, return_mask=return_mask, seed=seed) if return_mask is not None: drop_values, mask = drop_values not_zero = tf.math.not_equal(drop_values, 0) values = tf.boolean_mask(drop_values, not_zero) indices = tf.boolean_mask(sp_tensor.indices, not_zero) new_tensor = tf.SparseTensor(indices, values, dense_shape) if return_mask is not None: return new_tensor, mask else: return new_tensor
def sparse_dropout(sp_tensor, probability=0.2, scale=True, seed=None, mask=None, return_mask=False, alpha=False, name="sparse_dropout"): """ Performs a dropout on a `SparseTensor`. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: sp_tensor (`SparseTensor`): a sparse tensor on which the dropout is performed. mask (`Tensor`): a binary random mask to be applied to the values of this tensor return_mask (`bool`): if true returns the random_mask used to perform dropout (result,random_mask) probability (`float`, `Tensor`): A scalar tensor with the same type as x. The probability that each element is kept. scale (`bool`): if True rescales the input to 1 / keep_prob else simply drops without rescaling seed (`int): A Python integer used as seed. (See `TensorFlow` documentation for ``tf.set_random_seed`` for behavior.) alpha (`bool`): if True uses `alpha_dropout` instead of `dropout` in the inputs name (`str`): A name for this operation (optional). """ with tf.name_scope(name=name): dense_shape = sp_tensor.dense_shape if not sp_tensor.values.dtype.is_floating: raise ValueError("sp_tensor has to be a floating point tensor since its values are going to" " be scaled. Got a %s tensor instead." % sp_tensor.dtype) if not 0 <= probability < 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % probability) probability = tf.convert_to_tensor(probability, dtype=sp_tensor.dtype, name="drop_probability") if alpha: drop_values = alpha_dropout(tensor=sp_tensor.values, random_mask=mask, probability=probability, return_mask=return_mask, seed=seed ) else: drop_values = dropout(tensor=sp_tensor.values, random_mask=mask, probability=probability, scale=scale, return_mask=return_mask, seed=seed) if return_mask is not None: drop_values, mask = drop_values not_zero = tf.math.not_equal(drop_values, 0) values = tf.boolean_mask(drop_values, not_zero) indices = tf.boolean_mask(sp_tensor.indices, not_zero) new_tensor = tf.SparseTensor(indices, values, dense_shape) if return_mask is not None: return new_tensor, mask else: return new_tensor
Python
def apply_gate(tensor, gate): """ Applies a gate tensor to the given input if input tensor outer dimension is a multiple of gate outer dimension we use broadcasting to apply the gate evenly across the input tensor. Example: ```python tx.apply_gate(tf.ones([1,4]),[1.,0.]) [[1., 1., 0., 0.]] ``` Args: tensor (`Tensor`): an input tensor gate (`Tensor`): float tensor that is multiplied by the input tensor. The outer dimension of the input tensor should either match the gate tensor or be a multiple of gate tensor. Returns: gated (`Tensor`): input tensor gated using the given gate weights """ with tf.name_scope("apply_gate"): tensor = as_tensor(tensor) gate = as_tensor(gate) n_gates = tf.shape(gate)[-1] n_units = tf.shape(tensor)[-1] feature_dim = n_units // n_gates if isinstance(tensor, tf.SparseTensor): tensor_in = tf.sparse.reshape(tensor, [-1, n_gates, feature_dim]) gate = tf.expand_dims(gate, -1) gated = mx.sparse_dense_multiply(tensor_in, gate) else: tensor_in = tf.reshape(tensor, [-1, n_gates, feature_dim]) gated = tensor_in * tf.expand_dims(gate, -1) out_shape = tf.stack([-1, n_units]) output = tf.reshape(gated, out_shape) return output
def apply_gate(tensor, gate): """ Applies a gate tensor to the given input if input tensor outer dimension is a multiple of gate outer dimension we use broadcasting to apply the gate evenly across the input tensor. Example: ```python tx.apply_gate(tf.ones([1,4]),[1.,0.]) [[1., 1., 0., 0.]] ``` Args: tensor (`Tensor`): an input tensor gate (`Tensor`): float tensor that is multiplied by the input tensor. The outer dimension of the input tensor should either match the gate tensor or be a multiple of gate tensor. Returns: gated (`Tensor`): input tensor gated using the given gate weights """ with tf.name_scope("apply_gate"): tensor = as_tensor(tensor) gate = as_tensor(gate) n_gates = tf.shape(gate)[-1] n_units = tf.shape(tensor)[-1] feature_dim = n_units // n_gates if isinstance(tensor, tf.SparseTensor): tensor_in = tf.sparse.reshape(tensor, [-1, n_gates, feature_dim]) gate = tf.expand_dims(gate, -1) gated = mx.sparse_dense_multiply(tensor_in, gate) else: tensor_in = tf.reshape(tensor, [-1, n_gates, feature_dim]) gated = tensor_in * tf.expand_dims(gate, -1) out_shape = tf.stack([-1, n_units]) output = tf.reshape(gated, out_shape) return output
Python
def empty_sparse_tensor(dense_shape, dtype=tf.float32, name="empty_sp_tensor"): """ Creates an empty `SparseTensor` Args: dense_shape (`TensorShape`): a 1-D tensor, python list, or numpy array with the output shape for the sparse tensor dtype (`DType`): the dtype of the values for the empty tf.SparseTensor name (`str`): a name for this operation Returns: sp_tensor (`SparseTensor`): an empty sparse tensor with a given shape """ with tf.name_scope(name): dense_shape = tf.convert_to_tensor(dense_shape, name="dense_shape", dtype=tf.int64) index_shape = dense_shape.get_shape().with_rank(1) empty_indices = tf.ones([0, index_shape[0]], dtype=tf.int64) empty_values = tf.ones([0], dtype=dtype) return tf.SparseTensor(empty_indices, empty_values, dense_shape)
def empty_sparse_tensor(dense_shape, dtype=tf.float32, name="empty_sp_tensor"): """ Creates an empty `SparseTensor` Args: dense_shape (`TensorShape`): a 1-D tensor, python list, or numpy array with the output shape for the sparse tensor dtype (`DType`): the dtype of the values for the empty tf.SparseTensor name (`str`): a name for this operation Returns: sp_tensor (`SparseTensor`): an empty sparse tensor with a given shape """ with tf.name_scope(name): dense_shape = tf.convert_to_tensor(dense_shape, name="dense_shape", dtype=tf.int64) index_shape = dense_shape.get_shape().with_rank(1) empty_indices = tf.ones([0, index_shape[0]], dtype=tf.int64) empty_values = tf.ones([0], dtype=dtype) return tf.SparseTensor(empty_indices, empty_values, dense_shape)
Python
def embedding_lookup_sparse(params, sp_tensor, combiner=None, max_norm=None, name="embedding_lookup_sparse"): """Computes embeddings for the given ids and weights. !!! info assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. !!! note in tensorflow's implementation, sparse gradients do not propagate through gather. Args: sp_tensor: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_tensor (`SparseTensor`): N x M `SparseTensor` with the ids and weights where N is typically batch size and M is arbitrary. combiner: A string specifying the reduction op. Currently "mean", "sqrtn" and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum of the squares of the weights. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. name (`str`): op name Returns: tensor (`Tensor`): dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: logging.warn("The default value of combiner will change from \"mean\" " "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") if isinstance(params, PartitionedVariable): params = list(params) # Iterate to get the underlying Variables. if not isinstance(params, list): params = [params] if not isinstance(sp_tensor, tf.SparseTensor): raise TypeError("sp_ids must be SparseTensor") with tf.name_scope(name) as name: segment_ids = sp_tensor.indices[:, 0] if segment_ids.dtype != tf.int32: segment_ids = tf.cast(segment_ids, tf.int32) ids = sp_tensor.indices[:, -1] # ids, idx = tf.unique(ids) embeddings = tf.nn.embedding_lookup( params=params, ids=ids, max_norm=max_norm) # *** # this second lookup causes problems because sparse gradients don't propagate though gather # embeddings = embedding_lookup(embeddings, idx) # embeddings, _ = gather_dynamic(embeddings, idx) # *** weights = sp_tensor.values if weights.dtype != embeddings.dtype: weights = tf.cast(weights, embeddings.dtype) # Reshape weights to allow broadcast ones = tf.fill( tf.expand_dims(tf.rank(embeddings) - 1, 0), 1) bcast_weights_shape = tf.concat( [tf.shape(weights), ones], 0) orig_weights_shape = weights.get_shape() weights = tf.reshape(weights, bcast_weights_shape) # Set the weight shape, since after reshaping to bcast_weights_shape, # the shape becomes None. if embeddings.get_shape().ndims is not None: weights.set_shape(orig_weights_shape.concatenate( [1 for _ in range(embeddings.get_shape().ndims - 1)])) embeddings *= weights if combiner == "sum": embeddings = tf.math.segment_sum(embeddings, segment_ids, name=name) elif combiner == "mean": embeddings = tf.math.segment_sum(embeddings, segment_ids) weight_sum = tf.math.segment_sum(weights, segment_ids) embeddings = tf.math.divide_no_nan(embeddings, weight_sum, name=name) elif combiner == "sqrtn": embeddings = tf.math.segment_sum(embeddings, segment_ids) weights_squared = tf.math.pow(weights, 2) weight_sum = tf.math.segment_sum(weights_squared, segment_ids) weight_sum_sqrt = tf.math.sqrt(weight_sum) embeddings = tf.math.divide_no_nan(embeddings, weight_sum_sqrt, name=name) else: assert False, "Unrecognized combiner" return embeddings
def embedding_lookup_sparse(params, sp_tensor, combiner=None, max_norm=None, name="embedding_lookup_sparse"): """Computes embeddings for the given ids and weights. !!! info assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. !!! note in tensorflow's implementation, sparse gradients do not propagate through gather. Args: sp_tensor: params: A single tensor representing the complete embedding tensor, or a list of P tensors all of same shape except for the first dimension, representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_tensor (`SparseTensor`): N x M `SparseTensor` with the ids and weights where N is typically batch size and M is arbitrary. combiner: A string specifying the reduction op. Currently "mean", "sqrtn" and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum of the squares of the weights. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. name (`str`): op name Returns: tensor (`Tensor`): dense tensor representing the combined embeddings for the sparse ids. For each row in the dense tensor represented by `sp_ids`, the op looks up the embeddings for all ids in that row, multiplies them by the corresponding weight, and combines these embeddings as specified. Raises: TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is neither `None` nor `SparseTensor`. ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: logging.warn("The default value of combiner will change from \"mean\" " "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") if isinstance(params, PartitionedVariable): params = list(params) # Iterate to get the underlying Variables. if not isinstance(params, list): params = [params] if not isinstance(sp_tensor, tf.SparseTensor): raise TypeError("sp_ids must be SparseTensor") with tf.name_scope(name) as name: segment_ids = sp_tensor.indices[:, 0] if segment_ids.dtype != tf.int32: segment_ids = tf.cast(segment_ids, tf.int32) ids = sp_tensor.indices[:, -1] # ids, idx = tf.unique(ids) embeddings = tf.nn.embedding_lookup( params=params, ids=ids, max_norm=max_norm) # *** # this second lookup causes problems because sparse gradients don't propagate though gather # embeddings = embedding_lookup(embeddings, idx) # embeddings, _ = gather_dynamic(embeddings, idx) # *** weights = sp_tensor.values if weights.dtype != embeddings.dtype: weights = tf.cast(weights, embeddings.dtype) # Reshape weights to allow broadcast ones = tf.fill( tf.expand_dims(tf.rank(embeddings) - 1, 0), 1) bcast_weights_shape = tf.concat( [tf.shape(weights), ones], 0) orig_weights_shape = weights.get_shape() weights = tf.reshape(weights, bcast_weights_shape) # Set the weight shape, since after reshaping to bcast_weights_shape, # the shape becomes None. if embeddings.get_shape().ndims is not None: weights.set_shape(orig_weights_shape.concatenate( [1 for _ in range(embeddings.get_shape().ndims - 1)])) embeddings *= weights if combiner == "sum": embeddings = tf.math.segment_sum(embeddings, segment_ids, name=name) elif combiner == "mean": embeddings = tf.math.segment_sum(embeddings, segment_ids) weight_sum = tf.math.segment_sum(weights, segment_ids) embeddings = tf.math.divide_no_nan(embeddings, weight_sum, name=name) elif combiner == "sqrtn": embeddings = tf.math.segment_sum(embeddings, segment_ids) weights_squared = tf.math.pow(weights, 2) weight_sum = tf.math.segment_sum(weights_squared, segment_ids) weight_sum_sqrt = tf.math.sqrt(weight_sum) embeddings = tf.math.divide_no_nan(embeddings, weight_sum_sqrt, name=name) else: assert False, "Unrecognized combiner" return embeddings
Python
def sparse_overlap(sp_tensor1, sp_tensor2, name="sparse_overlap"): """sparse overlap Returns a `SparseTensor` where the indices of the overlapping indices in the two sparse tensors with the values of the first one. Args: sp_tensor1 (`SparseTensor`): a sparse tensor sp_tensor2 (`SparseTensor`): another sparse tensor name (`str`): name for sparse_overlap op Returns: sp_tensor (`SparseTensor`): sparse tensor with the overlapping indices and the values of `sp_tensor1` """ with tf.name_scope(name): ones1 = mx.sparse_ones(sp_tensor1.indices, sp_tensor1.dense_shape) ones2 = mx.sparse_ones(sp_tensor2.indices, sp_tensor2.dense_shape) index_union = tf.sparse.add(ones1, ones2) index_filter = tf.equal(index_union.values, 2.) zeros1 = sparse_zeros(index_union.indices, index_union.dense_shape, sp_tensor1.values.dtype) expand1 = tf.sparse.add(zeros1, sp_tensor1) filtered = tf.sparse.retain(expand1, index_filter) return filtered
def sparse_overlap(sp_tensor1, sp_tensor2, name="sparse_overlap"): """sparse overlap Returns a `SparseTensor` where the indices of the overlapping indices in the two sparse tensors with the values of the first one. Args: sp_tensor1 (`SparseTensor`): a sparse tensor sp_tensor2 (`SparseTensor`): another sparse tensor name (`str`): name for sparse_overlap op Returns: sp_tensor (`SparseTensor`): sparse tensor with the overlapping indices and the values of `sp_tensor1` """ with tf.name_scope(name): ones1 = mx.sparse_ones(sp_tensor1.indices, sp_tensor1.dense_shape) ones2 = mx.sparse_ones(sp_tensor2.indices, sp_tensor2.dense_shape) index_union = tf.sparse.add(ones1, ones2) index_filter = tf.equal(index_union.values, 2.) zeros1 = sparse_zeros(index_union.indices, index_union.dense_shape, sp_tensor1.values.dtype) expand1 = tf.sparse.add(zeros1, sp_tensor1) filtered = tf.sparse.retain(expand1, index_filter) return filtered
Python
def sort_by_first(tensor1, tensor2, ascending=True, name="sort_by_first"): """sort_by_first Sorts two tensors. Sorts the second by the changes in the first sort Args: tensor1 (`Tensor`): tensor to determine the oder by which the second is sorted tensor2 (`Tensor`): tensor to be sorted according to the sorting of the first ascending (`Bool`): if True sorts by ascending order of value name (`str`): name of the op Returns: tensor1, tensor2 (`Tensor`,`Tensor`): sorted first tensor, second tensor sorted according to the indices of the first tensor sorting """ with tf.name_scope(name=name): tensor1 = as_tensor(tensor1) tensor2 = as_tensor(tensor2) sorted_tensor1, sorted_tensor1_indices = tf.nn.top_k(tensor1, k=tf.shape(tensor1)[-1]) if ascending: sorted_tensor1 = tf.reverse(sorted_tensor1, axis=[-1]) sorted_tensor1_indices = tf.reverse(sorted_tensor1_indices, axis=[-1]) # TODO not sure what the performance implication of this check is when converted to graph if len(tensor1.shape.as_list()) == 1: sorted_tensor1_indices = tf.expand_dims(sorted_tensor1_indices, 1) else: sorted_tensor1_indices = matrix_indices(sorted_tensor1_indices, sort_indices=False) sorted_values = tf.gather_nd(tensor2, sorted_tensor1_indices) sorted_values = tf.reshape(sorted_values, tf.shape(tensor2)) return sorted_tensor1, sorted_values
def sort_by_first(tensor1, tensor2, ascending=True, name="sort_by_first"): """sort_by_first Sorts two tensors. Sorts the second by the changes in the first sort Args: tensor1 (`Tensor`): tensor to determine the oder by which the second is sorted tensor2 (`Tensor`): tensor to be sorted according to the sorting of the first ascending (`Bool`): if True sorts by ascending order of value name (`str`): name of the op Returns: tensor1, tensor2 (`Tensor`,`Tensor`): sorted first tensor, second tensor sorted according to the indices of the first tensor sorting """ with tf.name_scope(name=name): tensor1 = as_tensor(tensor1) tensor2 = as_tensor(tensor2) sorted_tensor1, sorted_tensor1_indices = tf.nn.top_k(tensor1, k=tf.shape(tensor1)[-1]) if ascending: sorted_tensor1 = tf.reverse(sorted_tensor1, axis=[-1]) sorted_tensor1_indices = tf.reverse(sorted_tensor1_indices, axis=[-1]) # TODO not sure what the performance implication of this check is when converted to graph if len(tensor1.shape.as_list()) == 1: sorted_tensor1_indices = tf.expand_dims(sorted_tensor1_indices, 1) else: sorted_tensor1_indices = matrix_indices(sorted_tensor1_indices, sort_indices=False) sorted_values = tf.gather_nd(tensor2, sorted_tensor1_indices) sorted_values = tf.reshape(sorted_values, tf.shape(tensor2)) return sorted_tensor1, sorted_values
Python
def ranges(range_sizes, name="ranges"): """ ranges similar to concatenating multiple `tf.range` calls applied to each element of a given 1D tensor with range sizes. Example: ```python ranges([1,2,4]) [0,0,1,0,1,2,3] ``` the enums are `[0]`, `[0,1]`, `[0,1,2,3]` Args: range_sizes (`Tensor`): 1D tensor with range sizes name (`str`): ranges op name Returns: ranges (`Tensor`): a 1D `Tensor` with `tf.reduce_sum(range_sizes)` dimensions """ with tf.name_scope(name): range_sizes = tf.convert_to_tensor(range_sizes) tf.ensure_shape(range_sizes, tf.TensorShape([None])) tf.debugging.assert_greater(tf.shape(range_sizes)[0], 0, message="range_sizes cannot be empty") num_ranges = tf.shape(range_sizes)[0] # get maximum repeat length in x max_len = tf.math.reduce_max(range_sizes) x = tf.range(max_len) # tile it to the maximum repeat length [maxlen x maxlen] now x_repeat = tf.stack([num_ranges, 1], axis=0) x_tiled = tf.tile(tf.expand_dims(x, 0), x_repeat) # create a sequence mask using x # this will create a boolean matrix of shape [xlen, max_len] # where result[i,j] is true if j < x[i]. mask = tf.sequence_mask(range_sizes, max_len) # mask the elements based on the sequence mask return tf.boolean_mask(x_tiled, mask)
def ranges(range_sizes, name="ranges"): """ ranges similar to concatenating multiple `tf.range` calls applied to each element of a given 1D tensor with range sizes. Example: ```python ranges([1,2,4]) [0,0,1,0,1,2,3] ``` the enums are `[0]`, `[0,1]`, `[0,1,2,3]` Args: range_sizes (`Tensor`): 1D tensor with range sizes name (`str`): ranges op name Returns: ranges (`Tensor`): a 1D `Tensor` with `tf.reduce_sum(range_sizes)` dimensions """ with tf.name_scope(name): range_sizes = tf.convert_to_tensor(range_sizes) tf.ensure_shape(range_sizes, tf.TensorShape([None])) tf.debugging.assert_greater(tf.shape(range_sizes)[0], 0, message="range_sizes cannot be empty") num_ranges = tf.shape(range_sizes)[0] # get maximum repeat length in x max_len = tf.math.reduce_max(range_sizes) x = tf.range(max_len) # tile it to the maximum repeat length [maxlen x maxlen] now x_repeat = tf.stack([num_ranges, 1], axis=0) x_tiled = tf.tile(tf.expand_dims(x, 0), x_repeat) # create a sequence mask using x # this will create a boolean matrix of shape [xlen, max_len] # where result[i,j] is true if j < x[i]. mask = tf.sequence_mask(range_sizes, max_len) # mask the elements based on the sequence mask return tf.boolean_mask(x_tiled, mask)
Python
def gather_sparse(sp_tensor, ids, name="gather_sparse"): """ gather_sparse gather rows from a sparse tensor by the given ids and returns a sparse tensor !!! warning gathering from a `SparseTensor` is inefficient Example: ```python gather_sparse(sp_tensor,[1,1,4]) ``` returns a `[3,sp_tensor.dense_shape[-1]]` `SparseTensor` Args: sp_tensor (`SparseTensor`): sparse tensor ids (`Tensor`): an int tensor with the ids of the rows to be returned name (`str`): on name Returns: sp_gathered (`SparseTensor`): a sparse tensor with the gathered rows. """ with tf.name_scope(name=name): ids = tf.cast(ids, tf.int64) ids = tf.reshape(ids, [-1]) # count columns and compute row coordinates sp_column_ones = sparse_ones(sp_tensor.indices, sp_tensor.dense_shape, dtype=tf.int64) col_count = tf.sparse.reduce_sum(sp_column_ones, axis=-1) # sparse_reduce_sum sets shape to unknown col_count.set_shape([sp_tensor.get_shape().as_list()[0]]) col_count_cs = tf.math.cumsum(col_count) row_start_coor = col_count_cs - col_count g_col_count = tf.gather(col_count, ids) g_row_start_coor = tf.gather(row_start_coor, ids) row_start_coor = tf.repeat(g_row_start_coor, g_col_count) # col_counts = repeat_each(g_col_count, g_col_count) offset = ranges(g_col_count) # use modular arithmetic to make sure we get incremental coordinates # gather_ids = row_start_coor + offset % col_counts gather_ids = row_start_coor + offset num_ids = tf.cast(tf.shape(ids)[0], tf.int64) new_rows = tf.repeat(tf.range(num_ids), g_col_count) sp_cols = sp_tensor.indices[:, -1] new_cols = tf.gather(sp_cols, gather_ids) new_indices = tf.stack([new_rows, new_cols], axis=-1) new_values = tf.gather(sp_tensor.values, gather_ids) new_shape = tf.concat([tf.expand_dims(tf.cast(num_ids, tf.int64), -1), sp_tensor.dense_shape[1:]], axis=-1) sp = tf.SparseTensor(new_indices, new_values, new_shape) return sp
def gather_sparse(sp_tensor, ids, name="gather_sparse"): """ gather_sparse gather rows from a sparse tensor by the given ids and returns a sparse tensor !!! warning gathering from a `SparseTensor` is inefficient Example: ```python gather_sparse(sp_tensor,[1,1,4]) ``` returns a `[3,sp_tensor.dense_shape[-1]]` `SparseTensor` Args: sp_tensor (`SparseTensor`): sparse tensor ids (`Tensor`): an int tensor with the ids of the rows to be returned name (`str`): on name Returns: sp_gathered (`SparseTensor`): a sparse tensor with the gathered rows. """ with tf.name_scope(name=name): ids = tf.cast(ids, tf.int64) ids = tf.reshape(ids, [-1]) # count columns and compute row coordinates sp_column_ones = sparse_ones(sp_tensor.indices, sp_tensor.dense_shape, dtype=tf.int64) col_count = tf.sparse.reduce_sum(sp_column_ones, axis=-1) # sparse_reduce_sum sets shape to unknown col_count.set_shape([sp_tensor.get_shape().as_list()[0]]) col_count_cs = tf.math.cumsum(col_count) row_start_coor = col_count_cs - col_count g_col_count = tf.gather(col_count, ids) g_row_start_coor = tf.gather(row_start_coor, ids) row_start_coor = tf.repeat(g_row_start_coor, g_col_count) # col_counts = repeat_each(g_col_count, g_col_count) offset = ranges(g_col_count) # use modular arithmetic to make sure we get incremental coordinates # gather_ids = row_start_coor + offset % col_counts gather_ids = row_start_coor + offset num_ids = tf.cast(tf.shape(ids)[0], tf.int64) new_rows = tf.repeat(tf.range(num_ids), g_col_count) sp_cols = sp_tensor.indices[:, -1] new_cols = tf.gather(sp_cols, gather_ids) new_indices = tf.stack([new_rows, new_cols], axis=-1) new_values = tf.gather(sp_tensor.values, gather_ids) new_shape = tf.concat([tf.expand_dims(tf.cast(num_ids, tf.int64), -1), sp_tensor.dense_shape[1:]], axis=-1) sp = tf.SparseTensor(new_indices, new_values, new_shape) return sp
Python
def grid_2d(shape, name="grid_2d"): """ creates a tensor with a grid 2d coordinates Args: shape (`Tensor`): an Tensor of tf.int32 with a 2D shape for the grid name (`str`): grid_2d op name Returns: grid_coordinates (`Tensor`): 2D tensor with grid coordinates """ shape = as_tensor(shape, tf.int32) with tf.name_scope(name): x = tf.range(shape[0]) y = tf.range(shape[1]) x = x[tf.newaxis, :, tf.newaxis] y = y[:, tf.newaxis, tf.newaxis] return tf.reshape(tf.concat([x + tf.zeros_like(y), tf.zeros_like(x) + y], axis=2), [-1, 2])
def grid_2d(shape, name="grid_2d"): """ creates a tensor with a grid 2d coordinates Args: shape (`Tensor`): an Tensor of tf.int32 with a 2D shape for the grid name (`str`): grid_2d op name Returns: grid_coordinates (`Tensor`): 2D tensor with grid coordinates """ shape = as_tensor(shape, tf.int32) with tf.name_scope(name): x = tf.range(shape[0]) y = tf.range(shape[1]) x = x[tf.newaxis, :, tf.newaxis] y = y[:, tf.newaxis, tf.newaxis] return tf.reshape(tf.concat([x + tf.zeros_like(y), tf.zeros_like(x) + y], axis=2), [-1, 2])
Python
def sparse_tile(sp_tensor, num, name="sparse_tile"): """ Constructs a `SparseTensor` by replicating the input sparse tensor `num` times Args: sp_tensor (`SparseTensor`): a sparse input tensor to be tiled num (`int`): number of repetitions name (`str`): name for the op Returns: sp_tile (`SparseTensor`): result sparse tensor """ with tf.name_scope(name): sp_tensor = as_tensor(sp_tensor) values = tf.tile(sp_tensor.values, [num]) num = as_tensor(num, tf.int64) indices = tf.tile(sp_tensor.indices, [num, 1]) row_indices, col_indices = tf.unstack(indices, num=2, axis=-1) # fix row indices num_values = tf.shape(sp_tensor.values, out_type=tf.int64)[0] batch_size = tf.shape(sp_tensor, out_type=tf.int64)[0] # this is preferable to using dense shape directly because we need the num cols to be known dim = sp_tensor.dense_shape[-1] offset = tf.range(start=0, limit=num * batch_size, delta=batch_size, dtype=tf.int64) row_offset = repeat(x=offset, n=num_values) row_indices = row_indices + row_offset indices = tf.stack([row_indices, col_indices], axis=-1) tile_batch_size = batch_size * num tiled_dense_shape = tf.stack([tile_batch_size, dim], axis=0) sp_tilled = tf.SparseTensor(indices=indices, values=values, dense_shape=tiled_dense_shape) return sp_tilled
def sparse_tile(sp_tensor, num, name="sparse_tile"): """ Constructs a `SparseTensor` by replicating the input sparse tensor `num` times Args: sp_tensor (`SparseTensor`): a sparse input tensor to be tiled num (`int`): number of repetitions name (`str`): name for the op Returns: sp_tile (`SparseTensor`): result sparse tensor """ with tf.name_scope(name): sp_tensor = as_tensor(sp_tensor) values = tf.tile(sp_tensor.values, [num]) num = as_tensor(num, tf.int64) indices = tf.tile(sp_tensor.indices, [num, 1]) row_indices, col_indices = tf.unstack(indices, num=2, axis=-1) # fix row indices num_values = tf.shape(sp_tensor.values, out_type=tf.int64)[0] batch_size = tf.shape(sp_tensor, out_type=tf.int64)[0] # this is preferable to using dense shape directly because we need the num cols to be known dim = sp_tensor.dense_shape[-1] offset = tf.range(start=0, limit=num * batch_size, delta=batch_size, dtype=tf.int64) row_offset = repeat(x=offset, n=num_values) row_indices = row_indices + row_offset indices = tf.stack([row_indices, col_indices], axis=-1) tile_batch_size = batch_size * num tiled_dense_shape = tf.stack([tile_batch_size, dim], axis=0) sp_tilled = tf.SparseTensor(indices=indices, values=values, dense_shape=tiled_dense_shape) return sp_tilled