filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_31544 | import os
import copy
import time
import asyncio
import logging
from datetime import datetime
from functools import partial
from operator import itemgetter
from collections import defaultdict
from binascii import hexlify, unhexlify
from typing import Dict, Tuple, Type, Iterable, List, Optional, DefaultDict, NamedTuple
from lbry.schema.result import Outputs, INVALID, NOT_FOUND
from lbry.schema.url import URL
from lbry.crypto.hash import hash160, double_sha256, sha256
from lbry.crypto.base58 import Base58
from lbry.utils import LRUCacheWithMetrics
from .tasks import TaskGroup
from .database import Database
from .stream import StreamController
from .dewies import dewies_to_lbc
from .account import Account, AddressManager, SingleKey
from .network import Network
from .transaction import Transaction, Output
from .header import Headers, UnvalidatedHeaders
from .checkpoints import HASHES
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
from .bip32 import PubKey, PrivateKey
from .coinselection import CoinSelector
log = logging.getLogger(__name__)
LedgerType = Type['BaseLedger']
class LedgerRegistry(type):
ledgers: Dict[str, LedgerType] = {}
def __new__(mcs, name, bases, attrs):
cls: LedgerType = super().__new__(mcs, name, bases, attrs)
if not (name == 'BaseLedger' and not bases):
ledger_id = cls.get_id()
assert ledger_id not in mcs.ledgers, \
f'Ledger with id "{ledger_id}" already registered.'
mcs.ledgers[ledger_id] = cls
return cls
@classmethod
def get_ledger_class(mcs, ledger_id: str) -> LedgerType:
return mcs.ledgers[ledger_id]
class TransactionEvent(NamedTuple):
address: str
tx: Transaction
class AddressesGeneratedEvent(NamedTuple):
address_manager: AddressManager
addresses: List[str]
class BlockHeightEvent(NamedTuple):
height: int
change: int
class TransactionCacheItem:
__slots__ = '_tx', 'lock', 'has_tx', 'pending_verifications'
def __init__(self, tx: Optional[Transaction] = None, lock: Optional[asyncio.Lock] = None):
self.has_tx = asyncio.Event()
self.lock = lock or asyncio.Lock()
self._tx = self.tx = tx
self.pending_verifications = 0
@property
def tx(self) -> Optional[Transaction]:
return self._tx
@tx.setter
def tx(self, tx: Transaction):
self._tx = tx
if tx is not None:
self.has_tx.set()
class Ledger(metaclass=LedgerRegistry):
name = 'LBRY Credits'
symbol = 'LBC'
network_name = 'mainnet'
headers_class = Headers
secret_prefix = bytes((0x1c,))
pubkey_address_prefix = bytes((0x55,))
script_address_prefix = bytes((0x7a,))
extended_public_key_prefix = unhexlify('0488b21e')
extended_private_key_prefix = unhexlify('0488ade4')
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
genesis_bits = 0x1f00ffff
target_timespan = 150
default_fee_per_byte = 50
default_fee_per_name_char = 200000
checkpoints = HASHES
def __init__(self, config=None):
self.config = config or {}
self.db: Database = self.config.get('db') or Database(
os.path.join(self.path, "blockchain.db")
)
self.db.ledger = self
self.headers: Headers = self.config.get('headers') or self.headers_class(
os.path.join(self.path, "headers")
)
self.headers.checkpoints = self.checkpoints
self.network: Network = self.config.get('network') or Network(self)
self.network.on_header.listen(self.receive_header)
self.network.on_status.listen(self.process_status_update)
self.accounts = []
self.fee_per_byte: int = self.config.get('fee_per_byte', self.default_fee_per_byte)
self._on_transaction_controller = StreamController()
self.on_transaction = self._on_transaction_controller.stream
self.on_transaction.listen(
lambda e: log.info(
'(%s) on_transaction: address=%s, height=%s, is_verified=%s, tx.id=%s',
self.get_id(), e.address, e.tx.height, e.tx.is_verified, e.tx.id
)
)
self._on_address_controller = StreamController()
self.on_address = self._on_address_controller.stream
self.on_address.listen(
lambda e: log.info('(%s) on_address: %s', self.get_id(), e.addresses)
)
self._on_header_controller = StreamController()
self.on_header = self._on_header_controller.stream
self.on_header.listen(
lambda change: log.info(
'%s: added %s header blocks, final height %s',
self.get_id(), change, self.headers.height
)
)
self._download_height = 0
self._on_ready_controller = StreamController()
self.on_ready = self._on_ready_controller.stream
self._tx_cache = LRUCacheWithMetrics(self.config.get("tx_cache_size", 1024), metric_name='tx')
self._update_tasks = TaskGroup()
self._other_tasks = TaskGroup() # that we dont need to start
self._utxo_reservation_lock = asyncio.Lock()
self._header_processing_lock = asyncio.Lock()
self._address_update_locks: DefaultDict[str, asyncio.Lock] = defaultdict(asyncio.Lock)
self._history_lock = asyncio.Lock()
self.coin_selection_strategy = None
self._known_addresses_out_of_sync = set()
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
self._balance_cache = LRUCacheWithMetrics(2 ** 15)
@classmethod
def get_id(cls):
return '{}_{}'.format(cls.symbol.lower(), cls.network_name.lower())
@classmethod
def hash160_to_address(cls, h160):
raw_address = cls.pubkey_address_prefix + h160
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
@staticmethod
def address_to_hash160(address):
return Base58.decode(address)[1:21]
@classmethod
def is_valid_address(cls, address):
decoded = Base58.decode_check(address)
return decoded[0] == cls.pubkey_address_prefix[0]
@classmethod
def public_key_to_address(cls, public_key):
return cls.hash160_to_address(hash160(public_key))
@staticmethod
def private_key_to_wif(private_key):
return b'\x1c' + private_key + b'\x01'
@property
def path(self):
return os.path.join(self.config['data_path'], self.get_id())
def add_account(self, account: Account):
self.accounts.append(account)
async def _get_account_and_address_info_for_address(self, wallet, address):
match = await self.db.get_address(accounts=wallet.accounts, address=address)
if match:
for account in wallet.accounts:
if match['account'] == account.public_key.address:
return account, match
async def get_private_key_for_address(self, wallet, address) -> Optional[PrivateKey]:
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
account, address_info = match
return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
return None
async def get_public_key_for_address(self, wallet, address) -> Optional[PubKey]:
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
_, address_info = match
return address_info['pubkey']
return None
async def get_account_for_address(self, wallet, address):
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
return match[0]
async def get_effective_amount_estimators(self, funding_accounts: Iterable[Account]):
estimators = []
for account in funding_accounts:
utxos = await account.get_utxos(no_tx=True, no_channel_info=True)
for utxo in utxos:
estimators.append(utxo.get_estimator(self))
return estimators
async def get_addresses(self, **constraints):
return await self.db.get_addresses(**constraints)
def get_address_count(self, **constraints):
return self.db.get_address_count(**constraints)
async def get_spendable_utxos(self, amount: int, funding_accounts: Optional[Iterable['Account']], min_amount=1):
min_amount = min(amount // 10, min_amount)
fee = Output.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(self)
selector = CoinSelector(amount, fee)
async with self._utxo_reservation_lock:
if self.coin_selection_strategy == 'sqlite':
return await self.db.get_spendable_utxos(self, amount + fee, funding_accounts, min_amount=min_amount,
fee_per_byte=self.fee_per_byte)
txos = await self.get_effective_amount_estimators(funding_accounts)
spendables = selector.select(txos, self.coin_selection_strategy)
if spendables:
await self.reserve_outputs(s.txo for s in spendables)
return spendables
def reserve_outputs(self, txos):
return self.db.reserve_outputs(txos)
def release_outputs(self, txos):
return self.db.release_outputs(txos)
def release_tx(self, tx):
return self.release_outputs([txi.txo_ref.txo for txi in tx.inputs])
def get_utxos(self, **constraints):
self.constraint_spending_utxos(constraints)
return self.db.get_utxos(**constraints)
def get_utxo_count(self, **constraints):
self.constraint_spending_utxos(constraints)
return self.db.get_utxo_count(**constraints)
async def get_txos(self, resolve=False, **constraints) -> List[Output]:
txos = await self.db.get_txos(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), txos)
return txos
def get_txo_count(self, **constraints):
return self.db.get_txo_count(**constraints)
def get_txo_sum(self, **constraints):
return self.db.get_txo_sum(**constraints)
def get_txo_plot(self, **constraints):
return self.db.get_txo_plot(**constraints)
def get_transactions(self, **constraints):
return self.db.get_transactions(**constraints)
def get_transaction_count(self, **constraints):
return self.db.get_transaction_count(**constraints)
async def get_local_status_and_history(self, address, history=None):
if not history:
address_details = await self.db.get_address(address=address)
history = (address_details['history'] if address_details else '') or ''
parts = history.split(':')[:-1]
return (
hexlify(sha256(history.encode())).decode() if history else None,
list(zip(parts[0::2], map(int, parts[1::2])))
)
@staticmethod
def get_root_of_merkle_tree(branches, branch_positions, working_branch):
for i, branch in enumerate(branches):
other_branch = unhexlify(branch)[::-1]
other_branch_on_left = bool((branch_positions >> i) & 1)
if other_branch_on_left:
combined = other_branch + working_branch
else:
combined = working_branch + other_branch
working_branch = double_sha256(combined)
return hexlify(working_branch[::-1])
async def start(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
await asyncio.wait([
self.db.open(),
self.headers.open()
])
fully_synced = self.on_ready.first
asyncio.create_task(self.network.start())
await self.network.on_connected.first
async with self._header_processing_lock:
await self._update_tasks.add(self.initial_headers_sync())
self.network.on_connected.listen(self.join_network)
asyncio.ensure_future(self.join_network())
await fully_synced
await self.db.release_all_outputs()
await asyncio.gather(*(a.maybe_migrate_certificates() for a in self.accounts))
await asyncio.gather(*(a.save_max_gap() for a in self.accounts))
if len(self.accounts) > 10:
log.info("Loaded %i accounts", len(self.accounts))
else:
await self._report_state()
self.on_transaction.listen(self._reset_balance_cache)
async def join_network(self, *_):
log.info("Subscribing and updating accounts.")
await self._update_tasks.add(self.subscribe_accounts())
await self._update_tasks.done.wait()
self._on_ready_controller.add(True)
async def stop(self):
self._update_tasks.cancel()
self._other_tasks.cancel()
await self._update_tasks.done.wait()
await self._other_tasks.done.wait()
await self.network.stop()
await self.db.close()
await self.headers.close()
@property
def local_height_including_downloaded_height(self):
return max(self.headers.height, self._download_height)
async def initial_headers_sync(self):
get_chunk = partial(self.network.retriable_call, self.network.get_headers, count=1000, b64=True)
self.headers.chunk_getter = get_chunk
async def doit():
for height in reversed(sorted(self.headers.known_missing_checkpointed_chunks)):
async with self._header_processing_lock:
await self.headers.ensure_chunk_at(height)
self._other_tasks.add(doit())
await self.update_headers()
async def update_headers(self, height=None, headers=None, subscription_update=False):
rewound = 0
while True:
if height is None or height > len(self.headers):
# sometimes header subscription updates are for a header in the future
# which can't be connected, so we do a normal header sync instead
height = len(self.headers)
headers = None
subscription_update = False
if not headers:
header_response = await self.network.retriable_call(self.network.get_headers, height, 2001)
headers = header_response['hex']
if not headers:
# Nothing to do, network thinks we're already at the latest height.
return
added = await self.headers.connect(height, unhexlify(headers))
if added > 0:
height += added
self._on_header_controller.add(
BlockHeightEvent(self.headers.height, added))
if rewound > 0:
# we started rewinding blocks and apparently found
# a new chain
rewound = 0
await self.db.rewind_blockchain(height)
if subscription_update:
# subscription updates are for latest header already
# so we don't need to check if there are newer / more
# on another loop of update_headers(), just return instead
return
elif added == 0:
# we had headers to connect but none got connected, probably a reorganization
height -= 1
rewound += 1
log.warning(
"Blockchain Reorganization: attempting rewind to height %s from starting height %s",
height, height+rewound
)
self._tx_cache.clear()
else:
raise IndexError(f"headers.connect() returned negative number ({added})")
if height < 0:
raise IndexError(
"Blockchain reorganization rewound all the way back to genesis hash. "
"Something is very wrong. Maybe you are on the wrong blockchain?"
)
if rewound >= 100:
raise IndexError(
"Blockchain reorganization dropped {} headers. This is highly unusual. "
"Will not continue to attempt reorganizing. Please, delete the ledger "
"synchronization directory inside your wallet directory (folder: '{}') and "
"restart the program to synchronize from scratch."
.format(rewound, self.get_id())
)
headers = None # ready to download some more headers
# if we made it this far and this was a subscription_update
# it means something went wrong and now we're doing a more
# robust sync, turn off subscription update shortcut
subscription_update = False
async def receive_header(self, response):
async with self._header_processing_lock:
header = response[0]
await self.update_headers(
height=header['height'], headers=header['hex'], subscription_update=True
)
async def subscribe_accounts(self):
if self.network.is_connected and self.accounts:
log.info("Subscribe to %i accounts", len(self.accounts))
await asyncio.wait([
self.subscribe_account(a) for a in self.accounts
])
async def subscribe_account(self, account: Account):
for address_manager in account.address_managers.values():
await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
await account.ensure_address_gap()
async def unsubscribe_account(self, account: Account):
for address in await account.get_addresses():
await self.network.unsubscribe_address(address)
async def announce_addresses(self, address_manager: AddressManager, addresses: List[str]):
await self.subscribe_addresses(address_manager, addresses)
await self._on_address_controller.add(
AddressesGeneratedEvent(address_manager, addresses)
)
async def subscribe_addresses(self, address_manager: AddressManager, addresses: List[str], batch_size: int = 1000):
if self.network.is_connected and addresses:
addresses_remaining = list(addresses)
while addresses_remaining:
batch = addresses_remaining[:batch_size]
results = await self.network.subscribe_address(*batch)
for address, remote_status in zip(batch, results):
self._update_tasks.add(self.update_history(address, remote_status, address_manager))
addresses_remaining = addresses_remaining[batch_size:]
if self.network.client and self.network.client.server_address_and_port:
log.info("subscribed to %i/%i addresses on %s:%i", len(addresses) - len(addresses_remaining),
len(addresses), *self.network.client.server_address_and_port)
if self.network.client and self.network.client.server_address_and_port:
log.info(
"finished subscribing to %i addresses on %s:%i", len(addresses),
*self.network.client.server_address_and_port
)
def process_status_update(self, update):
address, remote_status = update
self._update_tasks.add(self.update_history(address, remote_status))
async def update_history(self, address, remote_status, address_manager: AddressManager = None,
reattempt_update: bool = True):
async with self._address_update_locks[address]:
self._known_addresses_out_of_sync.discard(address)
local_status, local_history = await self.get_local_status_and_history(address)
if local_status == remote_status:
return True
remote_history = await self.network.retriable_call(self.network.get_history, address)
remote_history = list(map(itemgetter('tx_hash', 'height'), remote_history))
we_need = set(remote_history) - set(local_history)
if not we_need:
remote_missing = set(local_history) - set(remote_history)
if remote_missing:
log.warning(
"%i transactions we have for %s are not in the remote address history",
len(remote_missing), address
)
return True
to_request = {}
pending_synced_history = {}
already_synced = set()
already_synced_offset = 0
for i, (txid, remote_height) in enumerate(remote_history):
if i == already_synced_offset and i < len(local_history) and local_history[i] == (txid, remote_height):
pending_synced_history[i] = f'{txid}:{remote_height}:'
already_synced.add((txid, remote_height))
already_synced_offset += 1
continue
tx_indexes = {}
for i, (txid, remote_height) in enumerate(remote_history):
tx_indexes[txid] = i
if (txid, remote_height) in already_synced:
continue
to_request[i] = (txid, remote_height)
log.debug(
"request %i transactions, %i/%i for %s are already synced", len(to_request), len(already_synced),
len(remote_history), address
)
remote_history_txids = set(txid for txid, _ in remote_history)
async for tx in self.request_synced_transactions(to_request, remote_history_txids, address):
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
if len(pending_synced_history) % 100 == 0:
log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request))
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
assert len(pending_synced_history) == len(remote_history), \
f"{len(pending_synced_history)} vs {len(remote_history)}"
synced_history = ""
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
assert i == remote_i, f"{i} vs {remote_i}"
txid, height = remote_history[remote_i]
if f"{txid}:{height}:" != pending_synced_history[i]:
log.warning("history mismatch: %s vs %s", remote_history[remote_i], pending_synced_history[i])
synced_history += pending_synced_history[i]
await self.db.set_address_history(address, synced_history)
if address_manager is None:
address_manager = await self.get_address_manager_for_address(address)
if address_manager is not None:
await address_manager.ensure_address_gap()
local_status, local_history = \
await self.get_local_status_and_history(address, synced_history)
if local_status != remote_status:
if local_history == remote_history:
log.warning(
"%s has a synced history but a mismatched status", address
)
return True
remote_set = set(remote_history)
local_set = set(local_history)
log.warning(
"%s is out of sync after syncing.\n"
"Remote: %s with %d items (%i unique), local: %s with %d items (%i unique).\n"
"Histories are mismatched on %i items.\n"
"Local is missing\n"
"%s\n"
"Remote is missing\n"
"%s\n"
"******",
address, remote_status, len(remote_history), len(remote_set),
local_status, len(local_history), len(local_set), len(remote_set.symmetric_difference(local_set)),
"\n".join([f"{txid} - {height}" for txid, height in local_set.difference(remote_set)]),
"\n".join([f"{txid} - {height}" for txid, height in remote_set.difference(local_set)])
)
self._known_addresses_out_of_sync.add(address)
return False
else:
log.debug("finished syncing transaction history for %s, %i known txs", address, len(local_history))
return True
async def maybe_verify_transaction(self, tx, remote_height, merkle=None):
tx.height = remote_height
if 0 < remote_height < len(self.headers):
# can't be tx.pending_verifications == 1 because we have to handle the transaction_show case
if not merkle:
merkle = await self.network.retriable_call(self.network.get_merkle, tx.id, remote_height)
if 'merkle' not in merkle:
return
merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
header = await self.headers.get(remote_height)
tx.position = merkle['pos']
tx.is_verified = merkle_root == header['merkle_root']
return tx
async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False):
batches = [[]]
remote_heights = {}
cache_hits = set()
for txid, height in sorted(to_request, key=lambda x: x[1]):
if cached:
cached_tx = self._tx_cache.get(txid)
if cached_tx is not None:
if cached_tx.tx is not None and cached_tx.tx.is_verified:
cache_hits.add(txid)
continue
else:
self._tx_cache[txid] = TransactionCacheItem()
remote_heights[txid] = height
if len(batches[-1]) == 100:
batches.append([])
batches[-1].append(txid)
if not batches[-1]:
batches.pop()
if cached and cache_hits:
yield {txid: self._tx_cache[txid].tx for txid in cache_hits}
for batch in batches:
txs = await self._single_batch(batch, remote_heights)
if cached:
for txid, tx in txs.items():
self._tx_cache[txid].tx = tx
yield txs
async def request_synced_transactions(self, to_request, remote_history, address):
async for txs in self.request_transactions(((txid, height) for txid, height in to_request.values())):
for tx in txs.values():
yield tx
await self._sync_and_save_batch(address, remote_history, txs)
async def _single_batch(self, batch, remote_heights):
heights = {remote_heights[txid] for txid in batch}
unrestriced = 0 < min(heights) < max(heights) < max(self.headers.checkpoints or [0])
batch_result = await self.network.retriable_call(self.network.get_transaction_batch, batch, not unrestriced)
txs = {}
for txid, (raw, merkle) in batch_result.items():
remote_height = remote_heights[txid]
tx = Transaction(unhexlify(raw), height=remote_height)
txs[tx.id] = tx
await self.maybe_verify_transaction(tx, remote_height, merkle)
return txs
async def _sync_and_save_batch(self, address, remote_history, pending_txs):
await asyncio.gather(*(self._sync(tx, remote_history, pending_txs) for tx in pending_txs.values()))
await self.db.save_transaction_io_batch(
pending_txs.values(), address, self.address_to_hash160(address), ""
)
while pending_txs:
self._on_transaction_controller.add(TransactionEvent(address, pending_txs.popitem()[1]))
async def _sync(self, tx, remote_history, pending_txs):
check_db_for_txos = {}
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
continue
wanted_txid = txi.txo_ref.tx_ref.id
if wanted_txid not in remote_history:
continue
if wanted_txid in pending_txs:
txi.txo_ref = pending_txs[wanted_txid].outputs[txi.txo_ref.position].ref
else:
check_db_for_txos[txi] = txi.txo_ref.id
referenced_txos = {} if not check_db_for_txos else {
txo.id: txo for txo in await self.db.get_txos(
txoid__in=list(check_db_for_txos.values()), order_by='txo.txoid', no_tx=True
)
}
for txi in check_db_for_txos:
if txi.txo_ref.id in referenced_txos:
txi.txo_ref = referenced_txos[txi.txo_ref.id].ref
else:
tx_from_db = await self.db.get_transaction(txid=txi.txo_ref.tx_ref.id)
if tx_from_db is None:
log.warning("%s not on db, not on cache, but on remote history!", txi.txo_ref.id)
else:
txi.txo_ref = tx_from_db.outputs[txi.txo_ref.position].ref
return tx
async def get_address_manager_for_address(self, address) -> Optional[AddressManager]:
details = await self.db.get_address(address=address)
for account in self.accounts:
if account.id == details['account']:
return account.address_managers[details['chain']]
return None
def broadcast(self, tx):
# broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode())
async def wait(self, tx: Transaction, height=-1, timeout=1):
timeout = timeout or 600 # after 10 minutes there is almost 0 hope
addresses = set()
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
addresses.add(
self.hash160_to_address(txi.txo_ref.txo.pubkey_hash)
)
for txo in tx.outputs:
if txo.has_address:
addresses.add(self.hash160_to_address(txo.pubkey_hash))
start = int(time.perf_counter())
while timeout and (int(time.perf_counter()) - start) <= timeout:
if await self._wait_round(tx, height, addresses):
return
raise asyncio.TimeoutError('Timed out waiting for transaction.')
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
records = await self.db.get_addresses(address__in=addresses)
_, pending = await asyncio.wait([
self.on_transaction.where(partial(
lambda a, e: a == e.address and e.tx.height >= height and e.tx.id == tx.id,
address_record['address']
)) for address_record in records
], timeout=1)
if not pending:
return True
records = await self.db.get_addresses(address__in=addresses)
for record in records:
local_history = (await self.get_local_status_and_history(
record['address'], history=record['history']
))[1] if record['history'] else []
for txid, local_height in local_history:
if txid == tx.id:
if local_height >= height:
return True
log.warning(
"local history has higher height than remote for %s (%i vs %i)", txid,
local_height, height
)
return False
log.warning(
"local history does not contain %s, requested height %i", tx.id, height
)
return False
async def _inflate_outputs(
self, query, accounts,
include_purchase_receipt=False,
include_is_my_output=False,
include_sent_supports=False,
include_sent_tips=False,
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
encoded_outputs = await query
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
txs: List[Transaction] = []
if len(outputs.txs) > 0:
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
txs.extend(tx.values())
_txos, blocked = outputs.inflate(txs)
txos = []
for txo in _txos:
if isinstance(txo, Output):
# transactions and outputs are cached and shared between wallets
# we don't want to leak informaion between wallet so we add the
# wallet specific metadata on throw away copies of the txos
txo = copy.copy(txo)
channel = txo.channel
txo.purchase_receipt = None
txo.update_annotations(None)
txo.channel = channel
txos.append(txo)
includes = (
include_purchase_receipt, include_is_my_output,
include_sent_supports, include_sent_tips
)
if accounts and any(includes):
receipts = {}
if include_purchase_receipt:
priced_claims = []
for txo in txos:
if isinstance(txo, Output) and txo.has_price:
priced_claims.append(txo)
if priced_claims:
receipts = {
txo.purchased_claim_id: txo for txo in
await self.db.get_purchases(
accounts=accounts,
purchased_claim_id__in=[c.claim_id for c in priced_claims]
)
}
for txo in txos:
if isinstance(txo, Output) and txo.can_decode_claim:
if include_purchase_receipt:
txo.purchase_receipt = receipts.get(txo.claim_id)
if include_is_my_output:
mine = await self.db.get_txo_count(
claim_id=txo.claim_id, txo_type__in=CLAIM_TYPES, is_my_output=True,
is_spent=False, accounts=accounts
)
if mine:
txo.is_my_output = True
else:
txo.is_my_output = False
if include_sent_supports:
supports = await self.db.get_txo_sum(
claim_id=txo.claim_id, txo_type=TXO_TYPES['support'],
is_my_input=True, is_my_output=True,
is_spent=False, accounts=accounts
)
txo.sent_supports = supports
if include_sent_tips:
tips = await self.db.get_txo_sum(
claim_id=txo.claim_id, txo_type=TXO_TYPES['support'],
is_my_input=True, is_my_output=False,
accounts=accounts
)
txo.sent_tips = tips
if include_received_tips:
tips = await self.db.get_txo_sum(
claim_id=txo.claim_id, txo_type=TXO_TYPES['support'],
is_my_input=False, is_my_output=True,
accounts=accounts
)
txo.received_tips = tips
return txos, blocked, outputs.offset, outputs.total
async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs):
txos = []
urls_copy = list(urls)
if new_sdk_server:
resolve = partial(self.network.new_resolve, new_sdk_server)
else:
resolve = partial(self.network.retriable_call, self.network.resolve)
while urls_copy:
batch, urls_copy = urls_copy[:100], urls_copy[100:]
txos.extend(
(await self._inflate_outputs(
resolve(batch), accounts, **kwargs
))[0]
)
assert len(urls) == len(txos), "Mismatch between urls requested for resolve and responses received."
result = {}
for url, txo in zip(urls, txos):
if txo:
if isinstance(txo, Output) and URL.parse(url).has_stream_in_channel:
if not txo.channel or not txo.is_signed_by(txo.channel, self):
txo = {'error': {'name': INVALID, 'text': f'{url} has invalid channel signature'}}
else:
txo = {'error': {'name': NOT_FOUND, 'text': f'{url} did not resolve to a claim'}}
result[url] = txo
return result
async def sum_supports(self, new_sdk_server, **kwargs) -> List[Dict]:
return await self.network.sum_supports(new_sdk_server, **kwargs)
async def claim_search(
self, accounts, include_purchase_receipt=False, include_is_my_output=False,
new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]:
if new_sdk_server:
claim_search = partial(self.network.new_claim_search, new_sdk_server)
else:
claim_search = self.network.claim_search
return await self._inflate_outputs(
claim_search(**kwargs), accounts,
include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output,
)
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
return claim
async def _report_state(self):
try:
for account in self.accounts:
balance = dewies_to_lbc(await account.get_balance(include_claims=True))
channel_count = await account.get_channel_count()
claim_count = await account.get_claim_count()
if isinstance(account.receiving, SingleKey):
log.info("Loaded single key account %s with %s LBC. "
"%d channels, %d certificates and %d claims",
account.id, balance, channel_count, len(account.channel_keys), claim_count)
else:
total_receiving = len(await account.receiving.get_addresses())
total_change = len(await account.change.get_addresses())
log.info("Loaded account %s with %s LBC, %d receiving addresses (gap: %d), "
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
account.id, balance, total_receiving, account.receiving.gap, total_change,
account.change.gap, channel_count, len(account.channel_keys), claim_count)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception(
'Failed to display wallet state, please file issue '
'for this bug along with the traceback you see below:')
async def _reset_balance_cache(self, e: TransactionEvent):
account_ids = [
r['account'] for r in await self.db.get_addresses(('account',), address=e.address)
]
for account_id in account_ids:
if account_id in self._balance_cache:
del self._balance_cache[account_id]
@staticmethod
def constraint_spending_utxos(constraints):
constraints['txo_type__in'] = (0, TXO_TYPES['purchase'])
async def get_purchases(self, resolve=False, **constraints):
purchases = await self.db.get_purchases(**constraints)
if resolve:
claim_ids = [p.purchased_claim_id for p in purchases]
try:
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up purchased claim ids:")
resolved = []
lookup = {claim.claim_id: claim for claim in resolved}
for purchase in purchases:
purchase.purchased_claim = lookup.get(purchase.purchased_claim_id)
return purchases
def get_purchase_count(self, resolve=False, **constraints):
return self.db.get_purchase_count(**constraints)
async def _resolve_for_local_results(self, accounts, txos):
txos = await self._resolve_for_local_claim_results(accounts, txos)
txos = await self._resolve_for_local_support_results(accounts, txos)
return txos
async def _resolve_for_local_claim_results(self, accounts, txos):
results = []
response = await self.resolve(
accounts, [txo.permanent_url for txo in txos if txo.can_decode_claim]
)
for txo in txos:
resolved = response.get(txo.permanent_url) if txo.can_decode_claim else None
if isinstance(resolved, Output):
resolved.update_annotations(txo)
results.append(resolved)
else:
if isinstance(resolved, dict) and 'error' in resolved:
txo.meta['error'] = resolved['error']
results.append(txo)
return results
async def _resolve_for_local_support_results(self, accounts, txos):
channel_ids = set()
signed_support_txos = []
for txo in txos:
support = txo.can_decode_support
if support and support.signing_channel_id:
channel_ids.add(support.signing_channel_id)
signed_support_txos.append(txo)
if channel_ids:
channels = {
channel.claim_id: channel for channel in
(await self.claim_search(accounts, claim_ids=list(channel_ids)))[0]
}
for txo in signed_support_txos:
txo.channel = channels.get(txo.support.signing_channel_id)
return txos
async def get_claims(self, resolve=False, **constraints):
claims = await self.db.get_claims(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), claims)
return claims
def get_claim_count(self, **constraints):
return self.db.get_claim_count(**constraints)
async def get_streams(self, resolve=False, **constraints):
streams = await self.db.get_streams(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), streams)
return streams
def get_stream_count(self, **constraints):
return self.db.get_stream_count(**constraints)
async def get_channels(self, resolve=False, **constraints):
channels = await self.db.get_channels(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), channels)
return channels
def get_channel_count(self, **constraints):
return self.db.get_channel_count(**constraints)
async def resolve_collection(self, collection, offset=0, page_size=1):
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
try:
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up collection claim ids:")
return []
claims = []
for claim_id in claim_ids:
found = False
for txo in resolve_results:
if txo.claim_id == claim_id:
claims.append(txo)
found = True
break
if not found:
claims.append(None)
return claims
async def get_collections(self, resolve_claims=0, resolve=False, **constraints):
collections = await self.db.get_collections(**constraints)
if resolve:
collections = await self._resolve_for_local_results(constraints.get('accounts', []), collections)
if resolve_claims > 0:
for collection in collections:
collection.claims = await self.resolve_collection(collection, page_size=resolve_claims)
return collections
def get_collection_count(self, resolve_claims=0, **constraints):
return self.db.get_collection_count(**constraints)
def get_supports(self, **constraints):
return self.db.get_supports(**constraints)
def get_support_count(self, **constraints):
return self.db.get_support_count(**constraints)
async def get_transaction_history(self, read_only=False, **constraints):
txs: List[Transaction] = await self.db.get_transactions(
include_is_my_output=True, include_is_spent=True,
read_only=read_only, **constraints
)
headers = self.headers
history = []
for tx in txs: # pylint: disable=too-many-nested-blocks
ts = headers.estimated_timestamp(tx.height)
item = {
'txid': tx.id,
'timestamp': ts,
'date': datetime.fromtimestamp(ts).isoformat(' ')[:-3] if tx.height > 0 else None,
'confirmations': (headers.height + 1) - tx.height if tx.height > 0 else 0,
'claim_info': [],
'update_info': [],
'support_info': [],
'abandon_info': [],
'purchase_info': []
}
is_my_inputs = all([txi.is_my_input for txi in tx.inputs])
if is_my_inputs:
# fees only matter if we are the ones paying them
item['value'] = dewies_to_lbc(tx.net_account_balance + tx.fee)
item['fee'] = dewies_to_lbc(-tx.fee)
else:
# someone else paid the fees
item['value'] = dewies_to_lbc(tx.net_account_balance)
item['fee'] = '0.0'
for txo in tx.my_claim_outputs:
item['claim_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(-txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position,
'is_spent': txo.is_spent,
})
for txo in tx.my_update_outputs:
if is_my_inputs: # updating my own claim
previous = None
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
other_txo = txi.txo_ref.txo
if (other_txo.is_claim or other_txo.script.is_support_claim) \
and other_txo.claim_id == txo.claim_id:
previous = other_txo
break
if previous is not None:
item['update_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(previous.amount - txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position,
'is_spent': txo.is_spent,
})
else: # someone sent us their claim
item['update_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(0),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position,
'is_spent': txo.is_spent,
})
for txo in tx.my_support_outputs:
item['support_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(txo.amount if not is_my_inputs else -txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'is_tip': not is_my_inputs,
'nout': txo.position,
'is_spent': txo.is_spent,
})
if is_my_inputs:
for txo in tx.other_support_outputs:
item['support_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(-txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'is_tip': is_my_inputs,
'nout': txo.position,
'is_spent': txo.is_spent,
})
for txo in tx.my_abandon_outputs:
item['abandon_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position
})
for txo in tx.any_purchase_outputs:
item['purchase_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(txo.amount if not is_my_inputs else -txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.purchased_claim_id,
'nout': txo.position,
'is_spent': txo.is_spent,
})
history.append(item)
return history
def get_transaction_history_count(self, read_only=False, **constraints):
return self.db.get_transaction_count(read_only=read_only, **constraints)
async def get_detailed_balance(self, accounts, confirmations=0):
result = {
'total': 0,
'available': 0,
'reserved': 0,
'reserved_subtotals': {
'claims': 0,
'supports': 0,
'tips': 0
}
}
for account in accounts:
balance = self._balance_cache.get(account.id)
if not balance:
balance = self._balance_cache[account.id] = \
await account.get_detailed_balance(confirmations, reserved_subtotals=True)
for key, value in balance.items():
if key == 'reserved_subtotals':
for subkey, subvalue in value.items():
result['reserved_subtotals'][subkey] += subvalue
else:
result[key] += value
return result
class TestNetLedger(Ledger):
network_name = 'testnet'
pubkey_address_prefix = bytes((111,))
script_address_prefix = bytes((196,))
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
checkpoints = {}
class RegTestLedger(Ledger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders
pubkey_address_prefix = bytes((111,))
script_address_prefix = bytes((196,))
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
genesis_bits = 0x207fffff
target_timespan = 1
checkpoints = {}
|
the-stack_106_31545 | from PIL import Image, ImageDraw, ImageFont
import numpy as np
import datetime
def get_combined_image(black_image, red_image):
result = np.full((black_image.height, black_image.width, 3), 255, 'uint8')
black = np.array(black_image)
red = np.array(red_image)
blacks = (black == 0)
reds = (red == 0)
result[...,0:3][reds] = (255,0,0)
result[...,0:3][blacks] = (0,0,0)
image = Image.fromarray(result, mode="RGB")
return image
class MockScreen:
def __init__(self, png_url, add_date=False):
self.png_url = png_url
self.add_date = add_date
def get_update_rate(self, cycle):
return 5
def update(self, display, cycle):
if self.add_date:
url = self.png_url % datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
else:
url = self.png_url
image = get_combined_image(display.black_image, display.red_image)
image.save(url, "PNG")
def clear(self, display):
display.clear()
self.update(display, None) |
the-stack_106_31547 | from Entity import *
from collections import deque
class Explosion(Entity):
pool = deque()
@staticmethod
def spawn(anim, x, y):
asset = Explosion() if len(Explosion.pool) == 0 or Explosion.pool[0].active else Explosion.pool.popleft()
asset.anim = anim
asset.x = x
asset.y = y
asset.looped = False
asset.active = True
Explosion.pool.append(asset)
Global.entities.append(asset)
def update(self):
super().update()
if self.looped: self.active = False # remove when the animation completes
|
the-stack_106_31548 | """Tests of theoretical results."""
# pylint: disable=redefined-outer-name,cyclic-import
import pytest
import numpy as np
from pathcensus import PathCensus
from tests.utils import get_largest_component
@pytest.fixture(scope="session")
def random_graph_connected(random_graph):
G, _ = random_graph
G = get_largest_component(G)
P = PathCensus(G)
return G, P
class TestTheory:
"""Test various theoretical results concerning
local similarity and complementarity coefficients.
"""
@staticmethod
def weighted_average(x, w):
m = np.isnan(x)
if m.all():
return 0
x = x[~m]
w = w[~m]
return (x * w).sum() / w.sum()
def test_similarity_node_edge_sum(self, random_graph_connected):
"""Test whether node similarity is a weighted average
of corresponding edge similarities.
"""
_, P = random_graph_connected
edge = P.simcoefs("edges", census=True, undefined="nan") \
.groupby(level="i") \
.apply(lambda df: \
self.weighted_average(df["sim"], df["tw"] + df["th"])
)
node = P.similarity("nodes", undefined="zero")
assert np.allclose(edge, node)
def test_similarity_node_edge_minmax_bounds(self, random_graph_connected):
"""Test whether node similarity is bounded between
minimum and maximum edge similarity.
"""
_, P = random_graph_connected
gdf = P.similarity("edges").groupby(level="i").agg([min, max])
s_node = P.similarity("nodes", undefined="undefined")
s_emin = gdf["min"]
s_emax = gdf["max"]
assert s_node.between(s_emin, s_emax).all()
def test_complementarity_node_edge_sum(self, random_graph_connected):
"""Test whether node complementarity is a weighted average
of corresponding edge complementairyt coefficients.
"""
_, P = random_graph_connected
edge = P.compcoefs("edges", census=True, undefined="nan") \
.groupby(level="i") \
.apply(lambda df: \
self.weighted_average(df["comp"], df["qw"] + df["qh"])
)
node = P.complementarity("nodes", undefined="zero")
assert np.allclose(edge, node)
def test_complementarity_node_edge_minmax_bounds(self, random_graph_connected):
"""Test whether node complementarity is bounded between
minimum and maximum edge complementarity.
"""
_, P = random_graph_connected
gdf = P.complementarity("edges").groupby(level="i").agg([min, max])
c_node = P.complementarity("nodes", undefined="zero")
c_emin = gdf["min"]
c_emax = gdf["max"]
assert c_node.between(c_emin, c_emax).all()
|
the-stack_106_31549 | # Link: https://leetcode.com/problems/longest-substring-without-repeating-characters/
"""
Problem statement:
Given a string s, find the length of the longest substring without repeating characters.
Example 1:
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Solution:
Approach ==> Optimized Sliding Window
Working:
1. An empty dictionary (also called HashMap) is initialized, which will hold the visited characters of the string
as keys and their (current index + 1) as values.
For example, {key: value} <---> {'a': 1}
2. Two pointers i and j will handle the range of substrings which need to be traversed.
3. Initially, pointer i and j hold the starting index of the string.
4. j is incremented till a character is repeated.
5. When the character is repeated, i is set to new index. This new index i specifies the new range of the substrings that
will be traversed further.
6. The length of the current substring (without repetitions) is compared with the previous max_length and updated if
it is greater.
7. This process from point 4 loops till all the characters are traversed.
"""
class Solution(object):
def longestSubstringWithoutRepeatingCharacters(self, s):
"""
:type s: str
:rtype: int
"""
max_length = 0
# visited: Dictionary to store the visited characters as keys and their (current index + 1) as values.
visited = {}
i = 0
for j in range(len(s)):
# Check if a character is repeated.
if s[j] in visited.keys():
# Update the range.
i = max(visited[s[j]], i)
# Compare the length of new substring (j - i + 1) with the old length and update it if greater than previous length.
max_length = max(max_length, j - i + 1)
# (current index + 1) for visited characters.
visited[s[j]] = j + 1
return max_length
# Time Complexity: O(n)
# Using Set :
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
# Sliding window method
left_pointer = 0
res = 0
char_set = set()
for right_pointer in range(len(s)):
while(s[right_pointer] in char_set):
char_set.remove(s[left_pointer])
left_pointer += 1
char_set.add(s[right_pointer])
res = max(res , right_pointer - left_pointer + 1)
return res
|
the-stack_106_31551 | """
.B.lender .V.ision .P.roject file operation
Gets properties for all skies in a .blend file. Stores properties in a list
of dictionaries (one dict for each sky (group) in the file), and saves that
list in a pickle (.pik) file with the same name as the .blend file.
These .pik files are loaded by the bvpLibrary class.
Critical to the sky
dictionaries are of the form:
{
'fname':'/path/to/Category_Blah.blend',
'name':'Sky_001_Whatever',
'semantic_category':['cloudy','day']
'real_world_size':100.000, # size of whole space in meters
'nVertices':1000,
'nFaces':900,
}
ML 2012.02
"""
# Imports
import bpy,bvp,os,re
import math as bnp
from bvp.utils.basics import savePik
from bvp.utils.blender import GetConstr
d = []
fName = os.path.split(bpy.data.filepath)[-1]
BaseCat = re.search('(?<=Category_)[A-Z,a-z,0-9]*',fName).group()
for G in bpy.data.groups:
try:
try:
semCat = G.objects[0]['semantic_category'].split(',')
except:
semCat = [BaseCat]
# Add file title category to list of categories, if not present:
if not semCat[0].lower()==BaseCat.lower():
semCat = [BaseCat.lower()]+semCat
except:
semCat = [BaseCat]
try:
rws = G.objects[0]['RealWorldSize'], # of the whole space
except:
rws = 100.
# Light locations and rotations
try:
LightLoc = [list(L.location) for L in G.objects if L.type=='LAMP']
LightRot = [[bnp.degrees(x) for x in L.rotation_euler] for L in G.objects if L.type=='LAMP']
LightType = [L.data.type for L in G.objects if L.type=='LAMP']
except:
raise Exception("Why aren't there any lamps in sky %s??"%G.name)
d.append(dict(
fname=bpy.data.filepath,
name=G.name,
semantic_category=semCat,
real_world_size=rws,
lightLoc=LightLoc,
lightRot=LightRot,
lightType=LightType,
nVertices=sum([len(oo.data.vertices) for oo in G.objects if oo.type=='MESH']),
nFaces=sum([len(oo.data.faces) for oo in G.objects if oo.type=='MESH']),
))
sName = bpy.data.filepath.replace('.blend','.pik')
savePik(d,sName) |
the-stack_106_31552 | """
$url lrt.lt
$type live
"""
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?lrt\.lt/mediateka/tiesiogiai/"
))
class LRT(Plugin):
_video_id_re = re.compile(r"""var\svideo_id\s*=\s*["'](?P<video_id>\w+)["']""")
API_URL = "https://www.lrt.lt/servisai/stream_url/live/get_live_url.php?channel={0}"
def _get_streams(self):
page = self.session.http.get(self.url)
m = self._video_id_re.search(page.text)
if m:
video_id = m.group("video_id")
data = self.session.http.get(self.API_URL.format(video_id)).json()
hls_url = data["response"]["data"]["content"]
yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
else:
log.debug("No match for video_id regex")
__plugin__ = LRT
|
the-stack_106_31553 | # ------------------------------------------
# --- Author: Bing
# --- Version: 1.0
# --- Description: This python script will update AWS Thing Shadow for a Device/Thing
# ------------------------------------------
# Import package
import paho.mqtt.client as mqtt
import ssl, time, sys
# =======================================================
# Set Following Variables
# AWS IoT Endpoint
MQTT_HOST = "a2xmpbgswmier.iot.us-west-2.amazonaws.com"
# CA Root Certificate File Path
CA_ROOT_CERT_FILE = "/home/pi/Documents/Amazon/rootCA.pem.crt"
# AWS IoT Thing Name
THING_NAME = "MyRaspberryPi"
# AWS IoT Thing Certificate File Path
THING_CERT_FILE = "/home/pi/Documents/Amazon/423ce807c5-certificate.pem.crt"
# AWS IoT Thing Private Key File Path
THING_PRIVATE_KEY_FILE = "/home/pi/Documents/Amazon/423ce807c5-private.pem.key"
# =======================================================
# =======================================================
# No need to change following variables
MQTT_PORT = 8883
MQTT_KEEPALIVE_INTERVAL = 45
SHADOW_UPDATE_TOPIC = "$aws/things/" + THING_NAME + "/shadow/update"
SHADOW_UPDATE_ACCEPTED_TOPIC = "$aws/things/" + THING_NAME + "/shadow/update/accepted"
SHADOW_UPDATE_REJECTED_TOPIC = "$aws/things/" + THING_NAME + "/shadow/update/rejected"
SHADOW_STATE_DOC_LED_ON = """{"state" : {"desired" : {"LED" : "ON"}}}"""
SHADOW_STATE_DOC_LED_OFF = """{"state" : {"desired" : {"LED" : "OFF"}}}"""
RESPONSE_RECEIVED = False
# =======================================================
# Initiate MQTT Client
mqttc = mqtt.Client("Bing_1")
# Define on connect event function
# We shall subscribe to Shadow Accepted and Rejected Topics in this function
def on_connect(mosq, obj,flags, rc):
mqttc.subscribe(SHADOW_UPDATE_ACCEPTED_TOPIC, 1)
mqttc.subscribe(SHADOW_UPDATE_REJECTED_TOPIC, 1)
# Define on_message event function.
# This function will be invoked every time,
# a new message arrives for the subscribed topic
def on_message(mosq, obj, msg):
if str(msg.topic) == SHADOW_UPDATE_ACCEPTED_TOPIC:
print("\n---SUCCESS---\nShadow State Doc Accepted by AWS IoT.")
print("Response JSON:\n" + str(msg.payload))
elif str(msg.topic) == SHADOW_UPDATE_REJECTED_TOPIC:
print("\n---FAILED---\nShadow State Doc Rejected by AWS IoT.")
print("Error Response JSON:\n" + str(msg.payload))
else:
print("AWS Response Topic: " + str(msg.topic))
print("QoS: " + str(msg.qos))
print("Payload: " + str(msg.payload))
# Disconnect from MQTT_Broker
mqttc.disconnect()
global RESPONSE_RECEIVED
RESPONSE_RECEIVED = True
# Register callback functions
mqttc.on_message = on_message
mqttc.on_connect = on_connect
# Configure TLS Set
mqttc.tls_set(CA_ROOT_CERT_FILE, certfile=THING_CERT_FILE, keyfile=THING_PRIVATE_KEY_FILE, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
# Connect with MQTT Broker
mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)
mqttc.loop_start()
print("Enter 1 to Turn On the LED")
print("Enter 2 to Turn OFF the LED")
print("Enter 3 to exit")
data = raw_input("Select an option:")
if data == "1":
mqttc.publish(SHADOW_UPDATE_TOPIC,SHADOW_STATE_DOC_LED_ON,qos=1)
elif data == "2":
mqttc.publish(SHADOW_UPDATE_TOPIC,SHADOW_STATE_DOC_LED_OFF,qos=1)
elif data == "3":
sys.exit()
else:
print("Invalid input try again...")
sys.exit()
# Wait for Response
Counter = 1
while True:
time.sleep(1)
if Counter == 10:
print("No response from AWS IoT. Check your Settings.")
break
elif RESPONSE_RECEIVED == True:
break
Counter = Counter + 1
|
the-stack_106_31557 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <[email protected]>
# Mon 16 Apr 08:18:08 2012 CEST
bob_packages = ['bob.core']
from setuptools import setup, find_packages, dist
dist.Distribution(dict(setup_requires=['bob.extension', 'bob.blitz'] + bob_packages))
from bob.extension.utils import egrep, find_header, find_library
from bob.extension import pkgconfig
from bob.blitz.extension import Extension, Library, build_ext
from bob.extension.utils import load_requirements
build_requires = load_requirements()
# Define package version
version = open("version.txt").read().rstrip()
packages = ['boost']
boost_modules = ['system', 'filesystem']
def libhdf5_version(header):
vv = egrep(header, r"#\s*define\s+H5_VERSION\s+\"([\d\.]+)\"")
if not len(vv): return None
return vv[0].group(1)
class hdf5:
def __init__ (self):
"""
Searches for libhdf5 in stock locations. Allows user to override.
If the user sets the environment variable BOB_PREFIX_PATH, that prefixes
the standard path locations.
"""
import os
self.name = 'hdf5'
# try to locate pkg-config on our own first
try:
header = 'hdf5.h'
candidates = find_header(header)
if not candidates:
raise RuntimeError("could not find %s's `%s' - have you installed %s on this machine?" % (self.name, header, self.name))
self.include_directories = [os.path.dirname(candidates[0])]
directory = os.path.dirname(candidates[0])
version_header = os.path.join(directory, 'H5pubconf.h')
self.version = libhdf5_version(version_header)
# normalize
self.include_directories = [os.path.normpath(i) for i in self.include_directories]
# find library
prefix = os.path.dirname(os.path.dirname(self.include_directories[0]))
module = 'hdf5'
candidates = find_library(module, version=self.version, prefixes=[prefix], only_static=False)
if not candidates:
raise RuntimeError("cannot find required %s binary module `%s' - make sure libhdf5 is installed on `%s'" % (self.name, module, prefix))
# libraries
self.libraries = []
name, ext = os.path.splitext(os.path.basename(candidates[0]))
if ext in ['.so', '.a', '.dylib', '.dll']:
self.libraries.append(name[3:]) #strip 'lib' from the name
else: #link against the whole thing
self.libraries.append(':' + os.path.basename(candidates[0]))
# library path
self.library_directories = [os.path.dirname(candidates[0])]
except RuntimeError:
# now, we try to use pkg-config, which seems to be only available on Debian
pkg = pkgconfig('hdf5')
self.include_directories = pkg.include_directories()
version_header = os.path.join(self.include_directories[0], 'H5pubconf.h')
self.version = libhdf5_version(version_header)
self.libraries = pkg.libraries()
self.library_directories = pkg.library_directories()
def macros(self):
return [
('HAVE_%s' % self.name.upper(), '1'),
]
hdf5_pkg = hdf5()
system_include_dirs = hdf5_pkg.include_directories
library_dirs = hdf5_pkg.library_directories
libraries = hdf5_pkg.libraries
define_macros = hdf5_pkg.macros()
setup(
name='bob.io.base',
version=version,
description='Basic IO for Bob',
url='http://gitlab.idiap.ch/bob/bob.io.base',
license='BSD',
author='Andre Anjos',
author_email='[email protected]',
long_description=open('README.rst').read(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
setup_requires = build_requires,
install_requires = build_requires,
ext_modules = [
Extension("bob.io.base.version",
[
"bob/io/base/version.cpp",
],
define_macros = define_macros,
system_include_dirs = system_include_dirs,
version = version,
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
),
Library("bob.io.base.bob_io_base",
[
"bob/io/base/cpp/CodecRegistry.cpp",
"bob/io/base/cpp/File.cpp",
"bob/io/base/cpp/HDF5ArrayFile.cpp",
"bob/io/base/cpp/HDF5Attribute.cpp",
"bob/io/base/cpp/HDF5Dataset.cpp",
"bob/io/base/cpp/HDF5File.cpp",
"bob/io/base/cpp/HDF5Group.cpp",
"bob/io/base/cpp/HDF5Types.cpp",
"bob/io/base/cpp/HDF5Utils.cpp",
"bob/io/base/cpp/reorder.cpp",
"bob/io/base/cpp/utils.cpp",
"bob/io/base/cpp/array.cpp",
"bob/io/base/cpp/array_type.cpp",
"bob/io/base/cpp/blitz_array.cpp",
],
libraries = libraries,
library_dirs = library_dirs,
system_include_dirs = system_include_dirs,
define_macros = define_macros,
version = version,
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
),
Extension("bob.io.base._library",
[
"bob/io/base/bobskin.cpp",
"bob/io/base/codec.cpp",
"bob/io/base/file.cpp",
"bob/io/base/hdf5.cpp",
"bob/io/base/main.cpp",
],
library_dirs = library_dirs,
libraries = libraries,
define_macros = define_macros,
system_include_dirs = system_include_dirs,
version = version,
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
),
Extension("bob.io.base._test",
[
"bob/io/base/test.cpp",
],
library_dirs = library_dirs,
libraries = libraries,
define_macros = define_macros,
system_include_dirs = system_include_dirs,
version = version,
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
),
],
cmdclass = {
'build_ext': build_ext
},
classifiers = [
'Framework :: Bob',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
the-stack_106_31558 | """tests rio_tiler.landsat8"""
import os
import pytest
from mock import patch
from rasterio.crs import CRS
from rio_toa import toa_utils
from rio_tiler import landsat8
from rio_tiler.errors import (
TileOutsideBounds,
InvalidBandName,
NoOverviewWarning,
InvalidLandsatSceneId,
)
LANDSAT_SCENE_C1 = "LC08_L1TP_016037_20170813_20170814_01_RT"
LANDSAT_BUCKET = os.path.join(os.path.dirname(__file__), "fixtures", "landsat-pds")
LANDSAT_PATH = os.path.join(
LANDSAT_BUCKET, "c1", "L8", "016", "037", LANDSAT_SCENE_C1, LANDSAT_SCENE_C1
)
with open("{}_MTL.txt".format(LANDSAT_PATH), "r") as f:
LANDSAT_METADATA = toa_utils._parse_mtl_txt(f.read())
with open("{}_MTL.txt".format(LANDSAT_PATH), "r") as f:
LANDSAT_METADATA_RAW = f.read().encode("utf-8")
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "TRUE")
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_bounds_valid(landsat_get_mtl):
"""
Should work as expected (get and parse metadata)
"""
landsat_get_mtl.return_value = LANDSAT_METADATA
meta = landsat8.bounds(LANDSAT_SCENE_C1)
assert meta.get("sceneid") == LANDSAT_SCENE_C1
assert len(meta.get("bounds")) == 4
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_metadata_valid_default(landsat_get_mtl, monkeypatch):
"""Get bounds and get stats for all bands."""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
meta = landsat8.metadata(LANDSAT_SCENE_C1)
assert meta["sceneid"] == LANDSAT_SCENE_C1
assert len(meta["bounds"]["value"]) == 4
assert len(meta["statistics"].items()) == 12
assert len(meta["statistics"]["1"]["histogram"][0]) == 10
assert list(map(int, meta["statistics"]["1"]["pc"])) == [1210, 7046]
meta = landsat8.metadata(LANDSAT_SCENE_C1, histogram_bins=20)
assert meta["sceneid"] == LANDSAT_SCENE_C1
assert len(meta["statistics"]["1"]["histogram"][0]) == 20
meta = landsat8.metadata(
LANDSAT_SCENE_C1, histogram_bins=None, histogram_range=[1000, 4000]
)
assert meta["sceneid"] == LANDSAT_SCENE_C1
assert len(meta["statistics"]["1"]["histogram"][0]) == 10
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_metadata_valid_custom(landsat_get_mtl, monkeypatch):
"""Get bounds and get stats for all bands with custom percentiles."""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
meta = landsat8.metadata(LANDSAT_SCENE_C1, pmin=10, pmax=90)
assert meta["sceneid"] == LANDSAT_SCENE_C1
assert len(meta["bounds"]["value"]) == 4
assert len(meta["statistics"].items()) == 12
assert list(map(int, meta["statistics"]["1"]["pc"])) == [1275, 3918]
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_valid_default(landsat_get_mtl, monkeypatch):
"""
Should work as expected
"""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 71
tile_y = 102
data, mask = landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z)
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_valid_nrg(landsat_get_mtl, monkeypatch):
"""Should return a custom band combination tile."""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 71
tile_y = 102
bands = ("5", "4", "3")
data, mask = landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z, bands=bands)
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_valid_tir(landsat_get_mtl, monkeypatch):
"""Should return a tile and mask from TIR band."""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 71
tile_y = 102
bands = "10"
data, mask = landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z, bands=bands)
assert data.shape == (1, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_valid_qa(landsat_get_mtl, monkeypatch):
"""Should return a tile and mask from TIR band."""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 71
tile_y = 102
bands = "QA"
data, mask = landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z, bands=bands)
assert data.shape == (1, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_invalidband(landsat_get_mtl, monkeypatch):
"""Should raise an error on invalid band name."""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
tile_z = 8
tile_x = 71
tile_y = 102
bands = "25"
with pytest.raises(InvalidBandName):
data, mask = landsat8.tile(
LANDSAT_SCENE_C1, tile_x, tile_y, tile_z, bands=bands
)
landsat_get_mtl.assert_not_called()
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_valid_pan(landsat_get_mtl, monkeypatch):
"""
Should work as expected
"""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 71
tile_y = 102
data, mask = landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z, pan=True)
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.landsat8._landsat_get_mtl")
def test_tile_invalid_bounds(landsat_get_mtl, monkeypatch):
"""
Should raise an error with invalid tile
"""
monkeypatch.setattr(landsat8, "LANDSAT_BUCKET", LANDSAT_BUCKET)
landsat_get_mtl.return_value = LANDSAT_METADATA
tile_z = 8
tile_x = 701
tile_y = 102
with pytest.raises(TileOutsideBounds):
landsat8.tile(LANDSAT_SCENE_C1, tile_x, tile_y, tile_z)
def test_landsat_id_pre_invalid():
"""Raises error on invalid pre-collection."""
scene = "L0300342017083LGN00"
with pytest.raises(InvalidLandsatSceneId):
landsat8._landsat_parse_scene_id(scene)
def test_landsat_id_c1_invalid():
"""Raises error on invalid collection1 sceneid."""
scene = "LC08_005004_20170410_20170414_01_T1"
with pytest.raises(InvalidLandsatSceneId):
landsat8._landsat_parse_scene_id(scene)
def test_landsat_id_pre_valid():
"""Parse landsat valid pre-collection sceneid and return metadata."""
scene = "LC80300342017083LGN00"
expected_content = {
"acquisitionJulianDay": "083",
"acquisitionYear": "2017",
"archiveVersion": "00",
"date": "2017-03-24",
"groundStationIdentifier": "LGN",
"key": "L8/030/034/LC80300342017083LGN00/LC80300342017083LGN00",
"path": "030",
"row": "034",
"satellite": "8",
"scene": "LC80300342017083LGN00",
"sensor": "C",
}
assert landsat8._landsat_parse_scene_id(scene) == expected_content
def test_landsat_id_c1_valid():
"""Parse landsat valid collection1 sceneid and return metadata."""
scene = "LC08_L1TP_005004_20170410_20170414_01_T1"
expected_content = {
"acquisitionDay": "10",
"acquisitionMonth": "04",
"acquisitionYear": "2017",
"collectionCategory": "T1",
"collectionNumber": "01",
"date": "2017-04-10",
"key": "c1/L8/005/004/LC08_L1TP_005004_20170410_\
20170414_01_T1/LC08_L1TP_005004_20170410_20170414_01_T1",
"path": "005",
"processingCorrectionLevel": "L1TP",
"processingDay": "14",
"processingMonth": "04",
"processingYear": "2017",
"row": "004",
"satellite": "08",
"scene": "LC08_L1TP_005004_20170410_20170414_01_T1",
"sensor": "C",
}
assert landsat8._landsat_parse_scene_id(scene) == expected_content
@patch("rio_tiler.landsat8.urlopen")
def test_landsat_get_mtl_valid(urlopen):
"""Return MTL metadata."""
urlopen.return_value.read.return_value = LANDSAT_METADATA_RAW
meta_data = landsat8._landsat_get_mtl(LANDSAT_SCENE_C1)
assert (
meta_data["L1_METADATA_FILE"]["METADATA_FILE_INFO"]["LANDSAT_SCENE_ID"]
== "LC80160372017225LGN00"
)
@patch("rio_tiler.landsat8.urlopen")
def test_landsat_get_mtl_invalid(urlopen):
"""Raises error when MTL file not found or empty."""
urlopen.return_value.read.return_value = {}
with pytest.raises(Exception):
landsat8._landsat_get_mtl(LANDSAT_SCENE_C1)
def test_landsat_get_stats_valid():
"""Should return a valid dict with array statistics."""
stats = landsat8._landsat_stats(
"4", LANDSAT_PATH, LANDSAT_METADATA["L1_METADATA_FILE"]
)
assert stats["bounds"]
assert stats["bounds"]["crs"] == CRS({"init": "EPSG:4326"})
assert stats["statistics"]["4"]
assert isinstance(stats["statistics"]["4"]["pc"][0], float)
assert list(map(int, stats["statistics"]["4"]["pc"])) == [423, 7028]
def test_landsat_get_stats_validOptions():
"""Should return a valid dict with array statistics."""
stats = landsat8._landsat_stats(
"10",
LANDSAT_PATH,
LANDSAT_METADATA["L1_METADATA_FILE"],
overview_level=2,
percentiles=(5, 95),
dst_crs="epsg:3857",
)
assert stats["bounds"]
assert stats["bounds"]["crs"] == "epsg:3857"
assert stats["statistics"]["10"]
assert list(map(int, stats["statistics"]["10"]["pc"])) == [281, 297]
def test_landsat_get_stats_noOverviews(monkeypatch):
"""Should return a valid dict with array statistics and warns about missing overviews."""
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
with pytest.warns(NoOverviewWarning):
stats = landsat8._landsat_stats(
"5", LANDSAT_PATH, LANDSAT_METADATA["L1_METADATA_FILE"]
)
assert stats["statistics"]["5"]
|
the-stack_106_31561 | # This file is part of the kambpf project (https://github.com/zdule/part_ii_project).
# It is file is offered under two licenses GPLv2 and Apache License Version 2.
# For more information see the LICENSE file at the root of the project.
#
# Copyright 2020 Dusan Zivanovic
import pandas as pd
import sys
import matplotlib.pyplot as plt
from pathlib import Path
import seaborn as sb
import numpy as np
from process_logs import process_logs
def plot_bandwidth(results, output_path):
results['rbw'] /= 1024
results['wbw'] /= 1024
ylabels = ["Read throughput [MiB/s]", "Write throughput [MiB/s]"]
fig, axes = plt.subplots(1,2, sharey=True, figsize=(8,4))
for i, rw in enumerate(['rbw','wbw']):
sb.boxplot(x="mechanism", y=rw, data=results, showfliers = False, ax=axes[i])
sb.swarmplot(x="mechanism", y=rw, data=results, color=".25", ax=axes[i])
axes[i].set_ylabel(ylabels[i])
axes[i].set_xlabel("")
plt.savefig(str(output_path / "bandwidth.png"))
plt.show()
def plot(res, output_path):
fig, axes = plt.subplots(3,2, sharex='col', figsize=(8,4))
res['latency'] /= 1000
ranges = [(200,600), (100,400)]
xlabels = ["Read latency [μs]", "Write latency [μs]"]
for j,rw in enumerate(['read','write']):
reads = res[res['rw']==rw]
pacici = []
labels = []
data = reads.groupby('mechanism')['latency'].apply(list)
prop_cycle = plt.rcParams['axes.prop_cycle']()
for i,(k,v) in enumerate(data.items()):
_,_,pache = axes[i][j].hist(v, 35, label=k, range = ranges[j], **next(prop_cycle))
pacici.append(pache[0])
labels.append(k)
axes[len(data)-1][j].set_xlabel(xlabels[j])
plt.figlegend(pacici,labels, loc = 'upper center', ncol=3 )
fig.text(0.04, 0.5, 'Frequency', va='center', rotation='vertical')
plt.savefig(str(output_path / "distribution.png"))
plt.show()
def log_plot(res, output_path):
fig, axes = plt.subplots(1,2, sharey=True, figsize=(8,4))
res['latency'] /= 1000
ylabels = {'read' : 'Read', 'write' : 'Write'}
for j, rw in enumerate(['read','write']):
reads = res[res['rw']==rw]
pacici = []
labels = []
data = reads.groupby('mechanism')['latency'].apply(list)
for i,(k,v) in enumerate(data.items()):
v = sorted(v)[:-10]
x = np.linspace(0,100, num=len(v))
axes[j].plot(x, v, label=k)
axes[j].set_xlabel("Percentile")
axes[j].set_ylabel(f'{ylabels[rw]} latency [μs]')
plt.legend()
plt.tight_layout()
plt.savefig(str(output_path / "percentiles.png"))
plt.show()
def process_lat_logs(folder):
res = None
for mechanism in ['kambpfprobes', 'kprobes', 'noprobes']:
path = folder / f"latency_mechanism-{mechanism}.json_lat.log"
df = pd.read_csv(path, header=None)
df = df[[1,2]].rename(columns={1: "latency", 2: "rw"})
df.loc[df['rw']==0,'rw'] = 'read'
df.loc[df['rw']==1,'rw'] = 'write'
mechanism_map = {'kambpfprobes':'kambpfprobes', 'kprobes' : 'kprobes', 'noprobes': 'untraced'}
df['mechanism'] = mechanism_map[mechanism]
print(df)
if res is None:
res = df
else:
res = res.append(df)
return res
if __name__== "__main__":
path = Path(sys.argv[1])
lat_logs = process_lat_logs(path)
logs = process_logs(path, "bandwidth")
logs.loc[logs['mechanism'] == 'noprobes','mechanism'] = 'untraced'
plot_bandwidth(logs, path)
plot(lat_logs, path)
log_plot(lat_logs, path)
|
the-stack_106_31562 | import os
import glob
import torch
import random
import numpy as np
from torch.utils.data import Dataset, DataLoader
from utils.utils import read_wav_np, read_flac_np
def create_dataloader(hp, args, train):
dataset = MelFromDisk(hp, args, train)
if train:
return DataLoader(dataset=dataset, batch_size=hp.train.batch_size, shuffle=True,
num_workers=hp.train.num_workers, pin_memory=True, drop_last=True)
else:
return DataLoader(dataset=dataset, batch_size=1, shuffle=False,
num_workers=hp.train.num_workers, pin_memory=True, drop_last=False)
class MelFromDisk(Dataset):
def __init__(self, hp, args, train):
self.hp = hp
self.args = args
self.train = train
self.path = hp.data.train if train else hp.data.validation
self.wav_list = glob.glob(os.path.join(self.path, '**', '*.flac'), recursive=True)
self.mel_segment_length = hp.audio.segment_length // hp.audio.hop_length + 2
self.mapping = [i for i in range(len(self.wav_list))]
def __len__(self):
return len(self.wav_list)
def __getitem__(self, idx):
if self.train:
idx1 = idx
idx2 = self.mapping[idx1]
return self.my_getitem(idx1), self.my_getitem(idx2)
else:
return self.my_getitem(idx)
def shuffle_mapping(self):
random.shuffle(self.mapping)
def my_getitem(self, idx):
wavpath = self.wav_list[idx]
melpath = wavpath.replace('.flac', '.mel')
sr, audio = read_flac_np(wavpath)
if len(audio) < self.hp.audio.segment_length + self.hp.audio.pad_short:
audio = np.pad(audio, (0, self.hp.audio.segment_length + self.hp.audio.pad_short - len(audio)), \
mode='constant', constant_values=0.0)
audio = torch.from_numpy(audio).unsqueeze(0)
mel = torch.load(melpath).squeeze(0)
if self.train:
max_mel_start = mel.size(1) - self.mel_segment_length
mel_start = random.randint(0, max_mel_start)
mel_end = mel_start + self.mel_segment_length
mel = mel[:, mel_start:mel_end]
audio_start = mel_start * self.hp.audio.hop_length
audio = audio[:, audio_start:audio_start+self.hp.audio.segment_length]
audio = audio + (1/32768) * torch.randn_like(audio)
return mel, audio
|
the-stack_106_31563 | import datetime
from django import forms
from django.test import TestCase
from django.utils.translation import activate
from institution.models import Institution
from users.forms import CustomUserChangeForm
from users.forms import CustomUserCreationForm
from users.forms import ProfileUpdateForm
from users.forms import RegisterForm
from users.models import CustomUser
from users.models import Profile
class ProfileUpdateFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
self.shibboleth_user = CustomUser.objects.get(email='[email protected]')
self.guest_user = CustomUser.objects.get(email='[email protected]')
def test_profile_update(self):
"""
Ensure the profile update form works for institutional and external users.
"""
test_cases = [
self.shibboleth_user,
self.guest_user,
]
for test_case in test_cases:
scw_username = 'x.test.username'
uid_number = 5000001
description = 'test user'
account_status = 1
form = ProfileUpdateForm(
data={
'user': test_case.pk,
'scw_username': scw_username,
'uid_number': uid_number,
'description': description,
'account_status': account_status,
},
instance=test_case.profile,
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(test_case.profile.scw_username, scw_username)
self.assertEqual(test_case.profile.uid_number, uid_number)
self.assertEqual(test_case.profile.description, description)
self.assertEqual(test_case.profile.account_status, account_status)
def test_pre_approved_options(self):
"""
Ensure the correct account status options are available for accounts that are awaiting
approval.
"""
self.shibboleth_user.profile.account_status = Profile.AWAITING_APPROVAL
self.shibboleth_user.profile.save()
self.assertEqual(self.shibboleth_user.profile.account_status, Profile.AWAITING_APPROVAL)
form = ProfileUpdateForm(
data={
'user': self.shibboleth_user.pk,
'account_status': self.shibboleth_user.profile.account_status,
},
instance=self.shibboleth_user.profile,
)
self.assertTrue(form.is_valid())
expected_choices = Profile.PRE_APPROVED_OPTIONS
actual_choices = form.fields['account_status'].widget.choices
self.assertEqual(actual_choices, expected_choices)
def test_post_approved_options(self):
"""
Ensure the correct account status options are available for accounts that have been
approved.
"""
self.shibboleth_user.profile.account_status = Profile.APPROVED
self.shibboleth_user.profile.save()
self.assertEqual(self.shibboleth_user.profile.account_status, Profile.APPROVED)
form = ProfileUpdateForm(
data={
'user': self.shibboleth_user.pk,
'account_status': Profile.APPROVED,
},
instance=self.shibboleth_user.profile,
)
self.assertTrue(form.is_valid())
expected_choices = Profile.POST_APPROVED_OPTIONS
actual_choices = form.fields['account_status'].widget.choices
self.assertEqual(actual_choices, expected_choices)
class CustomUserCreationFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
def test_create_user(self):
"""
Ensure the user creation form works for institutional and external users.
"""
test_cases = {
'@'.join(['shibboleth.user', self.institution.base_domain]): True,
'[email protected]': False,
}
for email, shibboleth_required in test_cases.items():
form = CustomUserCreationForm(
data={
'email': email,
'first_name': 'Joe',
'last_name': 'Bloggs',
'is_shibboleth_login_required': shibboleth_required,
})
self.assertTrue(form.is_valid())
def test_invalid_institutional_email(self):
"""
Ensure an email address from an unsupported institution domain is caught via the
CustomUserCreationForm, if the user is required to login via a shibboleth IDP.
"""
form = CustomUserCreationForm(
data={
'email': 'joe.bloggs@invalid_base_domain.ac.uk',
'first_name': 'Joe',
'last_name': 'Bloggs',
'is_shibboleth_login_required': True,
})
self.assertFalse(form.is_valid())
def test_without_required_fields(self):
"""
Ensure a CustomUser instance can not be created without the required form fields.
"""
activate('en')
form = CustomUserCreationForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['email'], ['This field is required.'])
self.assertEqual(form.errors['first_name'], ['This field is required.'])
self.assertEqual(form.errors['last_name'], ['This field is required.'])
def test_password_generation(self):
"""
Ensure a random password is genereted new user accounts.
"""
test_cases = {
'@'.join(['shibboleth.user', self.institution.base_domain]): True,
'[email protected]': False,
}
for email, shibboleth_required in test_cases.items():
form = CustomUserCreationForm(
data={
'email': email,
'first_name': 'Joe',
'last_name': 'Bloggs',
'is_shibboleth_login_required': shibboleth_required,
})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(CustomUser.objects.filter(email=email).count(), 1)
self.assertIsNotNone(CustomUser.objects.get(email=email).password)
class RegisterFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
]
def test_user_registration(self):
"""
Ensure the registration form works for shibboleth users.
"""
form = RegisterForm(
data={
'first_name': 'Joe',
'last_name': 'Bloggs',
'reason_for_account': 'HPC',
'accepted_terms_and_conditions': True,
})
self.assertTrue(form.is_valid())
def test_without_required_fields(self):
"""
Ensure the registration form fails if the required fields are missing.
"""
form = RegisterForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['first_name'], ['This field is required.'])
self.assertEqual(form.errors['last_name'], ['This field is required.'])
self.assertEqual(form.errors['reason_for_account'], ['This field is required.'])
self.assertEqual(form.errors['accepted_terms_and_conditions'], ['This field is required.'])
class CustomUserChangeFormTests(TestCase):
fixtures = [
'institution/fixtures/tests/institutions.json',
'users/fixtures/tests/users.json',
]
def setUp(self):
self.institution = Institution.objects.get(name='Example University')
self.shibboleth_user = CustomUser.objects.get(email='[email protected]')
def test_user_update(self):
"""
Ensure the user update form works.
"""
first_name = 'John'
last_name = 'Smith'
email = '[email protected]'
form = CustomUserChangeForm(
data={
'username': self.shibboleth_user.username,
'first_name': first_name,
'last_name': last_name,
'email': email,
'is_shibboleth_login_required': True,
'date_joined': datetime.date.today(),
},
instance=self.shibboleth_user,
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.shibboleth_user.first_name, first_name)
self.assertEqual(self.shibboleth_user.last_name, last_name)
self.assertEqual(self.shibboleth_user.email, email)
def test_invalid_institutional_email(self):
"""
Ensure an email address from an unsupported institution domain is caught.
"""
with self.assertRaises(Institution.DoesNotExist):
form = CustomUserChangeForm(
data={
'username': self.shibboleth_user.username,
'first_name': self.shibboleth_user.first_name,
'last_name': self.shibboleth_user.last_name,
'email': '[email protected]',
'is_shibboleth_login_required': True,
'date_joined': datetime.date.today(),
},
instance=self.shibboleth_user,
)
self.assertTrue(form.is_valid())
form.save()
|
the-stack_106_31564 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
from collections import defaultdict
from collections import Iterable
import contextlib
from .wrapped_decorator import signature_safe_contextmanager, wrap_decorator
import os
import re
import traceback
import six
import copy
import numpy as np
import subprocess
import multiprocessing
import sys
import logging
from .. import compat as cpt
from .proto import framework_pb2
from . import core
from . import unique_name
import paddle.version as fluid_version
import warnings
import functools
__all__ = [
'Program',
'default_startup_program',
'default_main_program',
'program_guard',
'name_scope',
'cuda_places',
'cpu_places',
'xpu_places',
'cuda_pinned_places',
'in_dygraph_mode',
'is_compiled_with_cuda',
'is_compiled_with_xpu',
'Variable',
'load_op_library',
'require_version',
'device_guard',
'set_flags',
'get_flags',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
TEMP_VAR_NAME = core.kTempVarName()
GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_dygraph_tracer_ = None
_global_expected_place_ = None
_current_device = None
global_prog_seed = 0
def require_version(min_version, max_version=None):
"""
Check if the installed version of PaddlePaddle is in [min_version, max_version],
if the installed version is lower than ``min_version`` or higher than ``max_version``,
an exception will be thrown, NO returns if the installed version is satisfied.
Args:
min_version (str): the minimum version required (like '1.4.0').
max_version (str, optional): the max version required (like '1.6.0'), default is None,
meaning any version equal or higher than ``min_version`` is acceptable.
Returns:
None.
Raises:
TypeError: if the type of ``min_version`` is not str.
TypeError: if the type of ``max_version`` is not str or type(None).
ValueError: if the value of ``min_version`` is not in version format.
ValueError: if the value of ``max_version`` is not in version format or None.
Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# any version >= 0.1.0 is acceptable.
fluid.require_version('0.1.0')
# if 0.1.0 <= version <= 10.0.0, it is acceptable.
fluid.require_version(min_version='0.1.0', max_version='10.0.0')
"""
if not isinstance(min_version, str):
raise TypeError(
"The type of 'min_version' in require_version must be str, but received %s."
% (type(min_version)))
if not isinstance(max_version, (str, type(None))):
raise TypeError(
"The type of 'max_version' in require_version must be str or type(None), but received %s."
% (type(max_version)))
check_format = re.match(r'\d+(\.\d+){0,3}', min_version)
if check_format is None or check_format.group() != min_version:
raise ValueError(
"The value of 'min_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % min_version)
if max_version is not None:
check_format = re.match(r'\d+(\.\d+){0,3}', max_version)
if check_format is None or check_format.group() != max_version:
raise ValueError(
"The value of 'max_version' in require_version must be in format '\\d+(\\.\\d+){0,3}', "
"like '1.5.2.0', but received %s" % max_version)
version_installed = [
fluid_version.major, fluid_version.minor, fluid_version.patch,
fluid_version.rc
]
zero_version = ['0', '0', '0', '0']
def version_cmp(ver_a, ver_b):
for i in six.moves.range(len(ver_a)):
if int(ver_a[i]) > int(ver_b[i]):
return 1
elif int(ver_a[i]) < int(ver_b[i]):
return -1
return 0
if version_cmp(version_installed, zero_version) == 0:
if max_version is not None:
warnings.warn(
"PaddlePaddle version in [%s, %s] required, but %s installed. "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, max_version, fluid_version.full_version))
else:
warnings.warn(
"PaddlePaddle version %s or higher is required, but %s installed, "
"Maybe you are using a develop version, "
"please make sure the version is good with your code." %
(min_version, fluid_version.full_version))
return
min_version_split = min_version.split('.')
min_version_to_check = min_version_split + zero_version[len(
min_version_split):]
if max_version is not None:
max_version_split = max_version.split('.')
max_version_to_check = max_version_split + zero_version[len(
max_version_split):]
if version_cmp(version_installed,
max_version_to_check) > 0 or version_cmp(
version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version in [%s, %s] required, but %s installed."
% (min_version, max_version, fluid_version.full_version))
else:
if version_cmp(version_installed, min_version_to_check) < 0:
raise Exception(
"VersionError: PaddlePaddle version %s or higher is required, but %s installed, "
"please upgrade your PaddlePaddle to %s or other higher version."
% (min_version, fluid_version.full_version, min_version))
def in_dygraph_mode():
"""
.. note::
Dynamic graph mode is turn ON by default since paddle 2.0.0
This API checks whether paddle runs in dynamic graph mode.
You can turn ON static graph mode by `enable_static <../dygraph/base/disable_dygraph_en.html>`_ ,
and turn OFF static graph mode by `disable_static <../dygraph/base/enable_dygraph_en.html>`_ .
Returns:
bool: Whether paddle runs in dynamic graph mode.
Examples:
.. code-block:: python
import paddle
print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0
paddle.enable_static()
print(paddle.in_dynamic_mode()) # False, Now we are in static mode
paddle.disable_static()
print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode
"""
return _dygraph_tracer_ is not None
def _dygraph_not_support_(func):
def __impl__(*args, **kwargs):
assert not in_dygraph_mode(
), "We don't support %s in imperative mode" % func.__name__
return func(*args, **kwargs)
return __impl__
def _dygraph_only_(func):
def __impl__(*args, **kwargs):
assert in_dygraph_mode(
), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__
return func(*args, **kwargs)
return __impl__
def _static_only_(func):
def __impl__(*args, **kwargs):
assert not in_dygraph_mode(
), "In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '%s()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode." % func.__name__
return func(*args, **kwargs)
return __impl__
# NOTE(zhiqiu): This decorator is used for the APIs of Variable which is only
# used to make Variable and VarBase has same interfaces, like numpy. Since VarBase is not exposed in our
# official docments, logically, we want to keep VarBase and logically consistent. While, actually,
# in our implementation, there some APIs not supported, like numpy, because Variable contains the desc.
# So, those APIs are listed under class Variable to generate docs only.
# TODO(zhiqiu): We should make VarBase consistent with Variable in future, for example, by inheritting
# same base class.
def _fake_interface_only_(func):
def __impl__(*args, **kwargs):
raise AssertionError(
"'%s' should be called by imperative Varible in imperative mode, please run it in dygraph "
"mode. You can turn off paddle.enable_static() if you are in static mode, or turn off "
"ProgramTranslator if you are using @paddle.jit.to_static. If you have to run ProgramTranslator, "
"please use other API to replace '%s'" % (func.__name__,
func.__name__))
return __impl__
# NOTE(chenweihang): There is argument name typo (stat_dict, correct name is state_dict)
# in fluid api Layer.set_dict, Optimizer.load, in order to correct the argument without
# introducing compatibility issues, add this decorator
# NOTE(chenweihang): not using `wrap_decorator` here is because `wrap_decorator` will
# move kwargs to args, which doesn't work in this decorate case
def deprecate_stat_dict(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'stat_dict' in kwargs:
warnings.warn(
"The argument `stat_dict` has deprecated, please change it to `state_dict`.",
DeprecationWarning)
kwargs['state_dict'] = kwargs['stat_dict']
kwargs.pop('stat_dict')
return func(*args, **kwargs)
return wrapper
dygraph_not_support = wrap_decorator(_dygraph_not_support_)
dygraph_only = wrap_decorator(_dygraph_only_)
static_only = wrap_decorator(_static_only_)
fake_interface_only = wrap_decorator(_fake_interface_only_)
def _dygraph_tracer():
return _dygraph_tracer_
def _current_expected_place():
global _global_expected_place_
if _global_expected_place_ is None:
if core.is_compiled_with_cuda():
try:
device_count = core.get_cuda_device_count()
except Exception as e:
device_count = 0
if device_count > 0:
_global_expected_place_ = core.CUDAPlace(0)
else:
warnings.warn(
"You are using GPU version Paddle, but your CUDA device is not set properly. CPU device will be used by default."
)
_global_expected_place_ = core.CPUPlace()
else:
_global_expected_place_ = core.CPUPlace()
return _global_expected_place_
def _set_dygraph_tracer_expected_place(place):
global _dygraph_tracer_
if _dygraph_tracer_ is not None:
_dygraph_tracer_._expected_place = place
def _set_expected_place(place):
global _global_expected_place_
_global_expected_place_ = place
_set_dygraph_tracer_expected_place(place)
# TODO(zhiqiu): remove this function.
def _var_base_to_np(var_base):
"""
convert VarBase tp numpy
Args:
var_base(VarBase) : the VarBase to convert
Returns (np.ndarray): the np.ndarray contain the value of VarBase
"""
warnings.warn(
"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base)."
)
return var_base.numpy()
def _cpu_num():
if "CPU_NUM" not in os.environ.keys():
if multiprocessing.cpu_count() > 1:
sys.stderr.write(
'!!! The CPU_NUM is not specified, you should set CPU_NUM in the environment variable list.\n'
'CPU_NUM indicates that how many CPUPlace are used in the current task.\n'
'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\n\n'
'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\n\n'
'!!! The default number of CPU_NUM=1.\n'.format(
multiprocessing.cpu_count(), multiprocessing.cpu_count()))
os.environ['CPU_NUM'] = str(1)
cpu_num = os.environ.get('CPU_NUM')
return int(cpu_num)
def _cuda_ids():
gpus_env = os.getenv("FLAGS_selected_gpus")
if gpus_env:
device_ids = [int(s) for s in gpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_cuda_device_count())
return device_ids
def _xpu_ids():
xpus_env = os.getenv("FLAGS_selected_xpus")
if xpus_env:
device_ids = [int(s) for s in xpus_env.split(",")]
else:
device_ids = six.moves.range(core.get_xpu_device_count())
return device_ids
def is_compiled_with_xpu():
"""
Whether this whl package can be used to run the model on XPU.
Returns (bool): support xpu or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
support_xpu = fluid.is_compiled_with_xpu()
"""
return core.is_compiled_with_xpu()
def is_compiled_with_cuda():
"""
Whether this whl package can be used to run the model on GPU.
Returns (bool): `True` if CUDA is currently available, otherwise `False`.
Examples:
.. code-block:: python
import paddle
support_gpu = paddle.is_compiled_with_cuda()
"""
return core.is_compiled_with_cuda()
def cuda_places(device_ids=None):
"""
**Note**:
For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device.
The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable.
This function creates a list of :code:`paddle.CUDAPlace` objects.
If :code:`device_ids` is None, environment variable of
:code:`FLAGS_selected_gpus` would be checked first. For example, if
:code:`FLAGS_selected_gpus=0,1,2`, the returned list would
be [paddle.CUDAPlace(0), paddle.CUDAPlace(1), paddle.CUDAPlace(2)].
If :code:`FLAGS_selected_gpus` is not set, all visible
gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable.
If :code:`device_ids` is not None, it should be the device
ids of GPUs. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
[paddle.CUDAPlace(0), paddle.CUDAPlace(1), paddle.CUDAPlace(2)].
Parameters:
device_ids (list or tuple of int, optional): list of GPU device ids.
Returns:
list of paddle.CUDAPlace: Created GPU place list.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
cuda_places = static.cuda_places()
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_ids is None:
device_ids = _cuda_ids()
elif not isinstance(device_ids, (list, tuple)):
device_ids = [device_ids]
return [core.CUDAPlace(dev_id) for dev_id in device_ids]
def xpu_places(device_ids=None):
"""
**Note**:
For multi-card tasks, please use `FLAGS_selected_xpus` environment variable to set the visible XPU device.
This function creates a list of :code:`paddle.XPUPlace` objects.
If :code:`device_ids` is None, environment variable of
:code:`FLAGS_selected_xpus` would be checked first. For example, if
:code:`FLAGS_selected_xpus=0,1,2`, the returned list would
be [paddle.XPUPlace(0), paddle.XPUPlace(1), paddle.XPUPlace(2)].
If :code:`FLAGS_selected_xpus` is not set, all visible
xpu places would be returned.
If :code:`device_ids` is not None, it should be the device
ids of XPUs. For example, if :code:`device_ids=[0,1,2]`,
the returned list would be
[paddle.XPUPlace(0), paddle.XPUPlace(1), paddle.XPUPlace(2)].
Parameters:
device_ids (list or tuple of int, optional): list of XPU device ids.
Returns:
list of paddle.XPUPlace: Created XPU place list.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
xpu_places = static.xpu_places()
"""
assert core.is_compiled_with_xpu(), \
"Not compiled with XPU"
if device_ids is None:
device_ids = _xpu_ids()
elif not isinstance(device_ids, (list, tuple)):
device_ids = [device_ids]
return [core.XPUPlace(dev_id) for dev_id in device_ids]
def cpu_places(device_count=None):
"""
This function creates a list of :code:`paddle.CPUPlace` objects, and returns the created list.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of paddle.CPUPlace: Created list of CPU places.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
cpu_places = static.cpu_places()
"""
if device_count is None:
device_count = _cpu_num()
return [core.CPUPlace()] * device_count
def cuda_pinned_places(device_count=None):
"""
This function creates a list of :code:`fluid.CUDAPinnedPlace` objects.
If :code:`device_count` is None, the device count would
be determined by environment variable :code:`CPU_NUM`.
If :code:`CPU_NUM` is not set, the default value is 1,
i.e. CPU_NUM=1.
:code:`CPU_NUM` indicates the number of devices used in the current task.
The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.
Parameters:
device_count (int, optional): device number. Default: None.
Returns:
list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()
# or
cuda_pinned_places = fluid.cuda_pinned_places(1)
"""
assert core.is_compiled_with_cuda(), \
"Not compiled with CUDA"
if device_count is None:
device_count = len(_cuda_ids())
return [core.CUDAPinnedPlace()] * device_count
class NameScope(object):
def __init__(self, name="", parent=None):
self._children = dict()
self._name = name
self._parent = parent
def child(self, prefix):
if prefix not in self._children:
new_child = NameScope(prefix, self)
self._children[prefix] = [new_child]
else:
new_child = NameScope(prefix + "_%d" % len(self._children[prefix]),
self)
self._children[prefix].append(new_child)
return new_child
def parent(self):
return self._parent
def name(self):
return self._name
_name_scope = NameScope()
@signature_safe_contextmanager
def name_scope(prefix=None):
"""
:api_attr: Static Graph
Generate hierarchical name prefix for the operators in Static Graph.
Note:
This should only used for debugging and visualization purpose.
Don't use it for serious analysis such as graph/program transformations.
Don't use it in dygraph, since it will cause memory leak.
Args:
prefix(str, optional): prefix. Default is none.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
with paddle.static.name_scope("s1"):
a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')
b = a + 1
with paddle.static.name_scope("s2"):
c = b * 1
with paddle.static.name_scope("s3"):
d = c / 1
with paddle.static.name_scope("s1"):
f = paddle.tensor.pow(d, 2.0)
with paddle.static.name_scope("s4"):
g = f - 1
# Op are created in the default main program.
for op in paddle.static.default_main_program().block(0).ops:
# elementwise_add is created in /s1/
if op.type == 'elementwise_add':
assert op.desc.attr("op_namescope") == '/s1/'
# elementwise_mul is created in '/s1/s2'
elif op.type == 'elementwise_mul':
assert op.desc.attr("op_namescope") == '/s1/s2/'
# elementwise_div is created in '/s1/s3'
elif op.type == 'elementwise_div':
assert op.desc.attr("op_namescope") == '/s1/s3/'
# elementwise_sum is created in '/s4'
elif op.type == 'elementwise_sub':
assert op.desc.attr("op_namescope") == '/s4/'
# pow is created in /s1_1/
elif op.type == 'pow':
assert op.desc.attr("op_namescope") == '/s1_1/'
"""
# TODO(panyx0718): Only [0-9a-z].
# in dygraph we don't need namescope since it will cause mem leak
if in_dygraph_mode():
yield
else:
assert prefix, "namescope prefix can not be empty."
global _name_scope
_name_scope = _name_scope.child(prefix)
try:
yield
finally:
_name_scope = _name_scope.parent()
def _full_name_scope():
global _name_scope
scope = _name_scope
name = ""
while scope:
name = scope.name() + "/" + name
scope = scope.parent()
return name
def generate_control_dev_var_name():
import random
return CONTROL_DEP_VAR_PREFIX + "@" + str(random.random())
def grad_var_name(var_name):
"""
Returns:
str: gradient name for a certain var name
"""
return var_name + GRAD_VAR_SUFFIX
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
Args:
np_dtype(np.dtype): the data type in numpy.
Returns:
core.VarDesc.VarType: the data type in Paddle.
"""
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.VarDesc.VarType.FP32
elif dtype == np.float64:
return core.VarDesc.VarType.FP64
elif dtype == np.float16:
return core.VarDesc.VarType.FP16
elif dtype == np.int32:
return core.VarDesc.VarType.INT32
elif dtype == np.int16:
return core.VarDesc.VarType.INT16
elif dtype == np.int64:
return core.VarDesc.VarType.INT64
elif dtype == np.bool:
return core.VarDesc.VarType.BOOL
elif dtype == np.uint16:
# since there is still no support for bfloat16 in NumPy,
# uint16 is used for casting bfloat16
return core.VarDesc.VarType.BF16
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
elif dtype == np.int8:
return core.VarDesc.VarType.INT8
elif dtype == np.complex64:
return core.VarDesc.VarType.COMPLEX64
elif dtype == np.complex128:
return core.VarDesc.VarType.COMPLEX128
else:
raise ValueError("Not supported numpy dtype %s" % dtype)
def dtype_is_floating(dtype):
"""
Check the data type is floating or not.
Args:
dtype(np.dtype|core.VarDesc.VarType): data type.
Could be numpy format or Paddle format
Returns(bool): True if data type is a float value
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64
]
def _debug_string_(proto, throw_on_error=True):
"""
Get the debug string of a protobuf message. The message could be not
initialized.
Args:
proto(google.protobuf.message.Message): The protobuf message
throw_on_error(bool): True if raise an error when the protobuf message
is not initialized.
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError("{0} are not initialized.\nThe message is {1}:\n".
format(error_fields, proto))
return proto.__str__()
def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
persistable=None,
**kwargs):
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return core.VarBase(dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True
if persistable else False)
class VariableMetaClass(type):
@classmethod
def __instancecheck__(cls, instance):
t = type(instance)
if in_dygraph_mode():
return issubclass(t, core.VarBase)
else:
return issubclass(t, Variable)
class ParameterMetaClass(VariableMetaClass):
@classmethod
def __instancecheck__(cls, instance):
t = type(instance)
if in_dygraph_mode():
return issubclass(t, ParamBase)
else:
return issubclass(t, Parameter)
def _getitem_impl_(var, item):
"""
Slice the variable.
Args:
item(int/slice/tuple) : the index.
Returns:
Sliced variable
"""
if not isinstance(item, tuple):
item = [item]
decrease_axis = []
slice_axis = []
slice_start = []
slice_end = []
slice_step = []
use_strided_slice = False
reverse_axis = []
target_block = default_main_program().current_block()
def fill_constant(shape, value, force_cpu=False, out=None):
var.block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'force_cpu': force_cpu
})
out.stop_gradient = True
return out
for dim, slice_item in enumerate(item):
if isinstance(slice_item, slice):
start = slice_item.start
end = slice_item.stop
step = slice_item.step
if start is None and end is None and step is None:
continue
if step is None:
step = 1
if start is None and end is None:
assert (step == -1)
reverse_axis.append(dim)
continue
if start is None:
start = 0
if end is None:
end = 10000000
if step != 1:
use_strided_slice = True
slice_axis.append(dim)
slice_start.append(start)
slice_end.append(end)
slice_step.append(step)
else:
decrease_axis.append(dim)
slice_axis.append(dim)
slice_start.append(slice_item)
slice_step.append(1)
if isinstance(slice_item, Variable):
temp_1 = var.block.create_var(dtype=slice_item.dtype)
fill_constant([1], 1, force_cpu=True, out=temp_1)
temp_end = target_block.create_var(dtype=slice_item.dtype)
target_block.append_op(
type='elementwise_add',
inputs={'X': slice_item,
'Y': temp_1},
outputs={'Out': temp_end},
attrs={'axis': -1})
slice_end.append(temp_end)
else:
slice_end.append(slice_item + 1
if slice_item != -1 else 10000000)
def contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def get_new_list_tensor(old_list):
new_list_tensor = []
for dim in old_list:
if isinstance(dim, Variable):
dim.stop_gradient = True
new_list_tensor.append(dim)
else:
assert (isinstance(dim, int))
temp_out = var.block.create_var(dtype='int32')
fill_constant([1], dim, force_cpu=True, out=temp_out)
new_list_tensor.append(temp_out)
return new_list_tensor
inputs = {'Input': [var]}
attrs = {
'axes': slice_axis,
'starts': [],
'ends': [],
'decrease_axis': decrease_axis
}
if (use_strided_slice == True):
attrs['strides'] = []
infer_flags = list(1 for i in range(len(slice_axis)))
# starts
if contain_var(slice_start):
inputs['StartsTensorList'] = get_new_list_tensor(slice_start)
for i, dim in enumerate(slice_start):
if isinstance(dim, Variable):
attrs['starts'].append(-1)
infer_flags[i] = -1
else:
attrs['starts'].append(dim)
else:
attrs['starts'] = slice_start
# ends
if contain_var(slice_end):
inputs['EndsTensorList'] = get_new_list_tensor(slice_end)
for i, dim in enumerate(slice_end):
if isinstance(dim, Variable):
attrs['ends'].append(-1)
infer_flags[i] = -1
else:
attrs['ends'].append(dim)
else:
attrs['ends'] = slice_end
# strides
if use_strided_slice == True:
if contain_var(slice_step):
inputs['StridesTensorList'] = get_new_list_tensor(slice_step)
for i, dim in enumerate(slice_step):
if isinstance(dim, Variable):
attrs['strides'].append(-1)
infer_flags[i] = -1
else:
attrs['strides'].append(dim)
else:
attrs['strides'] = slice_step
# infer_flags
attrs['infer_flags'] = infer_flags
out = var
if use_strided_slice == False and len(slice_axis) > 0:
# append slice_op here
slice_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name + "_slice"),
dtype=var.dtype)
target_block.append_op(
type="slice",
inputs=inputs,
outputs={'Out': [slice_out_var]},
attrs=attrs)
out = slice_out_var
elif use_strided_slice == True and len(slice_axis) > 0:
strided_slice_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name +
"_strided_slice"),
dtype=var.dtype)
target_block.append_op(
type="strided_slice",
inputs=inputs,
outputs={'Out': [strided_slice_out_var]},
attrs=attrs)
out = strided_slice_out_var
if len(reverse_axis) > 0:
reverse_out_var = target_block.create_var(
name=unique_name.generate_with_ignorable_key(var.name +
"_slice_reverse"),
dtype=var.dtype)
target_block.append_op(
type="reverse",
inputs={'X': out},
outputs={'Out': [reverse_out_var]},
attrs={'axis': reverse_axis})
out = reverse_out_var
return out
@six.add_metaclass(VariableMetaClass)
class Variable(object):
"""
**Notes**:
**The constructor of Variable should not be invoked directly.**
**In Static Graph Mode: Please use** `Block.create_var` **to create a Static variable which has no data until being feed.**
**In Dygraph Mode: Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph variable with real data**
In Fluid, every input and output of an OP is a variable. In most
cases, variables are used for holding different kinds of data or training
labels. A variable belongs to a :ref:`api_guide_Block_en` . All variable has its own name and
two variables in different :ref:`api_guide_Block_en` could have the same name.
There are many kinds of variables. Each kind of them has its own attributes
and usages. Please refer to the `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_ for details.
Most of a Variable's member variables can be set to be None. It mean
it is not available or will be specified later.
Examples:
In Static Graph Mode:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
In `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ Mode:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
new_variable = fluid.dygraph.to_variable(np.arange(10))
"""
def __init__(self,
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
lod_level=None,
capacity=None,
persistable=None,
error_clip=None,
stop_gradient=False,
is_data=False,
need_check_feed=False,
belong_to_optimizer=False,
**kwargs):
self.block = block
if name is None:
name = unique_name.generate('_generated_var')
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
self.belong_to_optimizer = belong_to_optimizer
self.error_clip = error_clip
is_new_var = False
name = cpt.to_text(name)
self.desc = self.block.desc.find_var(cpt.to_bytes(name))
if self.desc is None:
self.desc = self.block.desc.var(cpt.to_bytes(name))
is_new_var = True
if is_new_var:
self.desc.set_type(type)
elif self.desc.type() != type:
raise ValueError("Variable '{0}' has been created before. The "
"previous type is {1}, the new type is {2}. They"
" are not matched".format(self.name,
self.desc.type(), type))
if shape is not None:
if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable '{0}' has been created before. The previous "
"shape is {1}, the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if is_new_var:
self.desc.set_dtype(dtype)
else:
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError("Variable '{0}' has been created before. "
"The previous data type is {1}, the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype,
dtype))
if lod_level is not None:
if is_new_var:
self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError("Variable '{0}' has been created before. "
"The previous lod_level is {1}, the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable '{0}' has been created before."
"The previous persistable is {1}, the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
if need_check_feed and is_new_var:
self.desc.set_need_check_feed(need_check_feed)
if capacity is not None:
if is_new_var:
self.desc.set_capacity(capacity)
else:
# TODO(abhinavarora) : Compare with set capacity once,
# get_capacity is implemented
pass
self.block.vars[name] = self
self.op = None
self._stop_gradient = stop_gradient
self.is_data = is_data
@fake_interface_only
def detach(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a new Variable, detached from the current graph.
Returns:
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
x = linear(data)
y = x.detach()
"""
pass
@fake_interface_only
def numpy(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a numpy array shows the value of current :ref:`api_guide_Variable_en`
Returns:
ndarray: The numpy value of current Variable.
Returns type:
ndarray: dtype is same as current Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
x = linear(data)
print(x.numpy())
"""
pass
@fake_interface_only
def set_value(self, value):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Set a new value for this Variable.
Args:
value (Variable|np.ndarray): the new value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.ones([3, 1024], dtype='float32')
with fluid.dygraph.guard():
linear = fluid.dygraph.Linear(1024, 4)
t = to_variable(data)
linear(t) # call with default weight
custom_weight = np.random.randn(1024, 4).astype("float32")
linear.weight.set_value(custom_weight) # change existing weight
out = linear(t) # call with different weight
"""
pass
@fake_interface_only
def backward(self, retain_graph=False):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Run backward of current Graph which starts from current Tensor.
Args:
retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
:code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.
Defaults to False.
Returns:
NoneType: None
Examples:
.. code-block:: python
import numpy as np
import paddle
paddle.disable_static()
x = np.ones([2, 2], np.float32)
inputs = []
for _ in range(10):
tmp = paddle.to_tensor(x)
# if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since
# there is no one need gradient on it.
tmp.stop_gradient=False
inputs.append(tmp)
ret = paddle.add_n(inputs)
loss = paddle.sum(ret)
loss.backward()
"""
pass
@fake_interface_only
def gradient(self):
"""
**Notes**:
**This API is ONLY available in Dygraph mode**
Get the Gradient of Current Variable
Returns:
ndarray or tuple of ndarray: if Variable's type is LoDTensor, return numpy value of the gradient of current Variable, if Variable's type is SelectedRows, return tuple of ndarray, first element of tuple is numpy value of the gradient of current Variable, second element of tuple is numpy value of the rows of current Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# example1: return ndarray
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
loss2.backward()
print(loss2.gradient())
# example2: return tuple of ndarray
with fluid.dygraph.guard():
embedding = fluid.dygraph.Embedding(
size=[20, 32],
param_attr='emb.w',
is_sparse=True)
x_data = np.arange(12).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, 3, 1))
x = fluid.dygraph.base.to_variable(x_data)
out = embedding(x)
out.backward()
print(embedding.weight.gradient())
"""
pass
@fake_interface_only
def clear_gradient(self):
"""
**Notes**:
**1. This API is ONLY available in Dygraph mode**
**2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**
Clear (set to ``0`` ) the Gradient of Current Variable
Returns: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
loss2.backward()
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
"""
pass
def __str__(self):
return self._to_readable_code()
def _to_readable_code(self):
"""
Get readable debug string of Variable.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Returns:
string: The formatted Variable string.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
cur_program = static.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable._to_readable_code())
"""
# VarType.LOD_TENSOR -> LOD_TENSOR
type_str = str(self.type).split('.')[1]
if self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.LOD_TENSOR:
dtype_str = str(self.dtype).split('.')[1]
var_str = "{name} : {type}.shape{shape}.dtype({dtype}).stop_gradient({stop_gradient})".\
format(name=self.name, type=type_str, shape=self.shape,
dtype=dtype_str, stop_gradient=self.stop_gradient)
else:
var_str = "{name} : {type})".\
format(name=self.name, type=type_str)
if type(self) == Parameter:
if self.trainable:
var_str = "trainable param " + var_str
else:
var_str = "param " + var_str
else:
var_str = "var " + var_str
if self.persistable:
var_str = "persist " + var_str
return var_str
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error (bool): True if raise an exception when self is not initialized.
with_details (bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True. Default value is False;
Returns:
str: The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print(new_variable.to_string(True))
print("=============with detail===============")
print(new_variable.to_string(True, True))
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", "stop_gradient")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
return res_str
__repr__ = __str__
@property
def stop_gradient(self):
"""
Indicating if we stop gradient from current Variable
**Notes: This Property has default value as** ``True`` **in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, while Parameter's default value is False. However, in Static Graph Mode all Variable's default stop_gradient value is** ``False``
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
value0 = np.arange(26).reshape(2, 13).astype("float32")
value1 = np.arange(6).reshape(2, 3).astype("float32")
value2 = np.arange(10).reshape(2, 5).astype("float32")
linear = fluid.Linear(13, 5, dtype="float32")
linear2 = fluid.Linear(3, 3, dtype="float32")
a = fluid.dygraph.to_variable(value0)
b = fluid.dygraph.to_variable(value1)
c = fluid.dygraph.to_variable(value2)
out1 = linear(a)
out2 = linear2(b)
out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward()
assert linear.weight.gradient() is None
assert (out1.gradient() == 0).all()
"""
return self._stop_gradient
@stop_gradient.setter
def stop_gradient(self, s):
self._stop_gradient = s
@property
def persistable(self):
"""
Indicating if we current Variable should be long-term alive
**Notes: This Property will be deprecated and this API is just to help user understand concept**
**1. All Variable's persistable is** ``False`` **except Parameters.**
**2. In** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, this property should not be changed**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("persistable of current Var is: {}".format(new_variable.persistable))
"""
return self.desc.persistable()
@persistable.setter
def persistable(self, p):
self.desc.set_persistable(p)
@property
def name(self):
"""
Indicating name of current Variable
**Notes: If it has two or more Varaible share the same name in the same** :ref:`api_guide_Block_en` **, it means these Variable will share content in no-** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode. This is how we achieve Parameter sharing**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("name of current Var is: {}".format(new_variable.name))
"""
return cpt.to_text(self.desc.name())
@property
def grad_name(self):
"""
Indicating name of the gradient Variable of current Variable.
**Notes: This is a read-only property. It simply returns name of
gradient Variable from a naming convention but doesn't guarantee
the gradient exists.**
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name="x", shape=[-1, 23, 48], dtype='float32')
print(x.grad_name) # output is "x@GRAD"
"""
return self.name + "@GRAD"
@name.setter
def name(self, new_name):
self.desc.set_name(new_name)
@property
def shape(self):
"""
Indicating shape of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("shape of current Var is: {}".format(new_variable.shape))
"""
# convert to tuple, make it as same as numpy API.
return tuple(self.desc.shape())
@property
def dtype(self):
"""
Indicating data type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Dtype of current Var is: {}".format(new_variable.dtype))
"""
return self.desc.dtype()
@property
def lod_level(self):
"""
Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning
of ``LoD``
**Notes**:
**1. This is a read-only property**
**2. Don't support this property in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, it's value should be** ``0(int)``
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("LoD Level of current Var is: {}".format(new_variable.lod_level))
"""
if self.type == core.VarDesc.VarType.SELECTED_ROWS:
raise Exception("SelectedRows DO NOT supprt lod")
return self.desc.lod_level()
@property
def type(self):
"""
Indicating Type of current Variable
**Notes: This is a read-only property**
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_variable = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
print("Type of current Var is: {}".format(new_variable.type))
"""
return self.desc.type()
def clone(self):
"""
Returns a new static Variable, which is the clone of the original static
Variable. It remains in the current graph, that is, the cloned Variable
provides gradient propagation. Calling ``out = tensor.clone()`` is same
as ``out = assign(tensor)`` .
Returns:
Variable: The cloned Variable.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
# create a static Variable
x = paddle.static.data(name='x', shape=[3, 2, 1])
# create a cloned Variable
y = x.clone()
"""
output = self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name + "_clone"),
dtype=self.dtype,
type=self.type,
persistable=self.persistable,
stop_gradient=self.stop_gradient)
self.block.append_op(
type='assign', inputs={'X': [self]}, outputs={'Out': [output]})
return output
def _set_error_clip(self, error_clip):
"""
Set the error_clip.
Args:
error_clip(BaseErrorClipAttr) : The new error_clip.
Returns:
None
"""
self.error_clip = error_clip
def _set_info(self, key, value):
"""
Set key-value information for this variable.
Args:
key(str): Key for this information.
value(object): The value associated to the key.
Returns:
None
"""
if not hasattr(self, "_info"):
self._info = {}
self._info[key] = value
def _get_info(self, key):
"""
Get the information of this variable corresponding to key.
Args:
key(str): Key for this information.
Returns:
object
"""
if hasattr(self, "_info") and key in self._info:
return self._info[key]
return None
def _slice_indices(self, slice, length):
"""
Reference implementation for the slice.indices method.
"""
# Compute step and length as integers.
step = 1 if slice.step is None else slice.step
# Raise ValueError for negative length or zero step.
if length < 0:
raise ValueError("length should not be negative")
if step == 0:
raise ValueError("slice step can not be zero")
# Find lower and upper bounds for start and stop.
lower = -1 if step < 0 else 0
upper = length - 1 if step < 0 else length
# Compute start.
if slice.start is None:
start = upper if step < 0 else lower
else:
start = slice.start
start = max(start + length, lower) if start < 0 else min(start,
upper)
# Compute stop.
if slice.stop is None:
stop = lower if step < 0 else upper
else:
stop = slice.stop
stop = max(stop + length, lower) if stop < 0 else min(stop, upper)
return start, stop, step
def _detectEllipsis(self, item):
has_ellipsis = False
start = 0
end = len(self.shape)
for index, o in enumerate(item):
if o is Ellipsis:
if has_ellipsis:
raise ValueError("Index can have one ellipsis only.")
has_ellipsis = True
start = index
else:
if has_ellipsis:
end = index
return has_ellipsis, start, end
def _reconstructSliceinfo(self, item):
has_ellipsis, start, end = self._detectEllipsis(item)
if has_ellipsis:
newitem = []
for i in range(start):
newitem.append(item[i])
for i in range(start, end):
newitem.append(slice(None, None, None))
for i in range(end, len(item)):
newitem.append(item[i])
return newitem
else:
return None
def _detectContinuesSlice(self, item):
starts = []
ends = []
for index, o in enumerate(item):
if isinstance(o, int):
start = int(o)
if (index > 0 and index >= self.shape[index]) \
or (index < 0 and (index + self.shape[index]) < 0):
raise IndexError("invalid index")
start = max(start + self.shape[index], 0) if start < 0 else min(
start, self.shape[index])
starts.append(start)
ends.append(start + 1)
elif isinstance(o, slice):
start, stop, step = self._slice_indices(o, self.shape[index])
if step == 1 or step == -1:
starts.append(start)
ends.append(stop)
else:
return False, None
else:
raise IndexError("Valid index accept int or slice or ellipsis")
return True, [starts, ends]
def _cloneVar(self, copy=False):
if not copy:
return self.block.create_var(
name=unique_name.generate_with_ignorable_key(self.name),
dtype=self.dtype)
else:
return self
def _sliceVar(self, axes, starts, ends):
new_var = self._cloneVar()
self.block.append_op(
type="slice",
inputs={'Input': [self]},
outputs={'Out': [new_var]},
attrs={'axes': axes,
'starts': starts,
'ends': ends})
return new_var
def _concatVar(self, inputs, axis):
new_var = self._cloneVar()
self.block.append_op(
type="concat",
inputs={'X': inputs},
outputs={'Out': [new_var]},
attrs={'axis': axis, })
return new_var
def _sliceAndConcatVar(self, item, axis):
if isinstance(item, slice):
if self.shape[axis] < 0:
return self._cloneVar(True)
start, stop, step = self._slice_indices(item, self.shape[axis])
if step == 1:
return self._sliceVar([axis], [start], [stop])
else:
vars = []
if step > 0:
while start < stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
else:
while start > stop:
vars.append(
self._sliceVar([axis], [start], [start + 1]))
start += step
return self._concatVar(vars, axis)
elif isinstance(item, int):
if self.shape[axis] < 0:
return self._cloneVar(True)
index = int(item)
if (index > 0 and index >= self.shape[axis]) \
or (index < 0 and (index + self.shape[axis]) < 0):
raise IndexError("invalid index")
return self._sliceVar([axis], [index], [index + 1])
else:
raise IndexError("Valid index accept int or slice or tuple")
def __getitem__(self, item):
return _getitem_impl_(self, item)
def __setitem__(self, item, value):
inputs = {'Input': self}
# 1. Parse item
if not isinstance(item, tuple):
item = [item]
axes = []
starts = []
ends = []
steps = []
max_integer = sys.maxsize
def replace_ellipsis(item):
# Use slice(None) to replace Ellipsis.
# For var, var.shape = [3,4,5,6]
#
# var[..., 1:2] -> var[:, :, :, 1:2]
# var[0, ...] -> var[0]
# var[0, ..., 1:2] -> var[0, :, :, 1:2]
item = list(item)
# Remove Variable to skip bug when counting Ellipsis
item_remove_var = [
ele for ele in item if not isinstance(ele, Variable)
]
ell_count = item_remove_var.count(Ellipsis)
if ell_count == 0:
return item
elif ell_count > 1:
raise IndexError(
"An index can only have a single ellipsis ('...')")
ell_idx = item.index(Ellipsis)
if ell_idx == len(item) - 1:
return item[:-1]
else:
item[ell_idx:ell_idx + 1] = [slice(None)] * (
len(self.shape) - len(item) + 1)
return item
item = replace_ellipsis(item)
for dim, slice_item in enumerate(item):
if isinstance(slice_item, slice):
start = slice_item.start
end = slice_item.stop
step = slice_item.step
if start is None and end is None and step is None:
continue
step = 1 if step is None else step
# TODO: support cases when step < 1
if not isinstance(step, Variable) and step == 0:
raise ValueError(
"When assign a value to a paddle.Tensor, step can not be 0, "
"but received step is {}.".format(step))
if isinstance(step, Variable) and (start is None or
end is None):
raise ValueError(
"When assign a value to a paddle.Tensor, it's not supported that "
"the start or end is None when the type of step is paddle.Tensor."
)
if start is None:
start = 0 if step > 0 else max_integer
if end is None:
end = max_integer if step > 0 else (0 - max_integer)
else:
start = slice_item
end = slice_item + 1 if slice_item != -1 else max_integer
step = 1
axes.append(dim)
starts.append(start)
ends.append(end)
steps.append(step)
attrs = {'axes': axes, 'starts': starts, 'ends': ends, 'steps': steps}
from .layers import utils
if utils._contain_var(starts):
inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)
del attrs['starts']
if utils._contain_var(ends):
inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)
del attrs['ends']
if utils._contain_var(steps):
inputs['StepsTensorList'] = utils._convert_to_tensor_list(steps)
del attrs['steps']
# 2. Parse value
dtype = self.dtype
attrs['dtype'] = dtype
from .data_feeder import convert_dtype
# 2.1 value is an integer of float
if isinstance(value, (int, float)):
value = np.array([value]).astype(convert_dtype(dtype))
# 2.2 value is a np.ndarray
if isinstance(value, np.ndarray):
shape = list(value.shape)
if dtype == core.VarDesc.VarType.BOOL:
value_name = "bool_values"
values = [bool(v) for v in value.flat]
elif dtype == core.VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in value.flat]
elif dtype == core.VarDesc.VarType.FP64:
value_name = "fp64_values"
values = [float(v) for v in value.flat]
elif dtype == core.VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in value.flat]
elif dtype == core.VarDesc.VarType.INT64:
value_name = "int64_values"
values = [int(v) for v in value.flat]
else:
raise TypeError(
"When assign a numpy.ndarray, integer or float to a paddle.Tensor, "
"the data type of the paddle.Tensor must be bool, float32, int32 or int64, but "
"received %s." % convert_dtype(dtype))
attrs[value_name] = values
attrs["shape"] = shape
elif isinstance(value, Variable):
inputs["ValueTensor"] = value
else:
raise TypeError(
"Only support to assign an integer, float, numpy.ndarray or "
"paddle.Tensor to a paddle.Tensor, but received {}".format(
type(value)))
self.block.append_op(
type="set_value", inputs=inputs, outputs={'Out': self}, attrs=attrs)
return self
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
Returns:
list: list of OpProto.
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))
ret_values.append(op_proto)
return ret_values
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
"""
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self):
assert not hasattr(
self.__class__,
'_instance'), 'Please use `instance()` to get OpProtoHolder object!'
op_protos = get_all_op_protos()
self.op_proto_map = {}
for proto in op_protos:
self.op_proto_map[proto.type] = proto
def get_op_proto(self, type):
"""
Get OpProto by a type string.
Args:
type(str): The type that operator registered in C++ side.
Returns(framework_pb2.OpProto): The OpProto
"""
if type not in self.op_proto_map:
raise ValueError("Operator \"%s\" has not been registered." % type)
return self.op_proto_map[type]
def update_op_proto(self):
op_protos = get_all_op_protos()
custom_op_names = []
for proto in op_protos:
if proto.type not in self.op_proto_map:
self.op_proto_map[proto.type] = proto
custom_op_names.append(proto.type)
return custom_op_names
@staticmethod
def generated_op_attr_names():
return {
core.op_proto_and_checker_maker.kOpRoleAttrName(),
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
core.op_proto_and_checker_maker.kOpNameScopeAttrName(),
core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(),
core.op_proto_and_checker_maker.kOpDeviceAttrName()
}
class Operator(object):
"""
In Fluid, all the operation are represented by Operator, and Operator
is regarded as a build in an instruction of a Block. Users can use the
build in instructions to describe their neural network.
Args:
block(Block): The block has the current operator.
desc(core.OpDesc): The protobuf description of Operator.
type(str): The type of operator. Default None.
inputs(dict): The input of this Operator. it is a dictionary, for every
element, key is the input parameter name, and value is a list of
variables. Default None.
outputs(dict): The output of this Operator. it is a dictionary, for
every element, key is the input parameter name, and value is a list
of variables. Default None.
attrs(dict): The attributes of this Operator. it is a dictionary, for
every element, key is attribute name, and value is the attribute value.
The attribute type should be as same as the type registered in C++ side.
Default None.
Returns:
Operator: The initialized Operator.
Raises:
ValueError: If the passed input, output and attrs doesn't match the
initializing Operator's that registered in C++ side.
Notes:
The constructor of operator should not be invoked directly. Use
Block.append_op or Block._prepend_op instead.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
# var1 += var2 + var3
cur_block.append_op(type="sum",
inputs={"X": [var1, var2, var3]},
outputs={"Out": [var1]})
"""
OP_WITHOUT_KERNEL_SET = {
'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',
'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',
'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',
'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id',
'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream',
'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv',
'c_wait_comm', 'c_wait_compute'
}
def __init__(self,
block,
desc,
type=None,
inputs=None,
outputs=None,
attrs=None):
if in_dygraph_mode():
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
self._type = type
self.attrs = attrs if attrs else {}
else:
self.block = block
self.desc = desc
# note: not add self.attrs here:
# https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173
op_attrs = attrs
if op_attrs is None:
op_attrs = dict()
del attrs
op_maker = core.op_proto_and_checker_maker
if op_maker.kOpRoleAttrName() not in op_attrs:
op_attrs[op_maker.kOpRoleAttrName(
)] = self.block.program._op_role
role_var_name = op_maker.kOpRoleVarAttrName()
if len(self.block.program.
_op_role_var) != 0 and role_var_name not in op_attrs:
op_attrs[role_var_name] = self.block.program._op_role_var
if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:
del op_attrs[role_var_name]
if len(self.desc.type()) != 0:
return
if type is None:
raise ValueError(
"`type` to initialized an Operator can not be None.")
else:
callstack_var_name = op_maker.kOpCreationCallstackAttrName()
op_attrs[callstack_var_name] = []
for frame in traceback.extract_stack():
op_attrs[callstack_var_name].append(
' File "{}", line {}, in {}'.format(frame[0], frame[1],
frame[2]))
op_attrs[callstack_var_name].append(' {}'.format(frame[
3]))
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
namescope_var_name = op_maker.kOpNameScopeAttrName()
op_attrs[namescope_var_name] = _full_name_scope()
# set device for op with kernels, give warning for op without kernels
# when force_cpu and device_guard are used at the same time, a warning will be given.
# TODO(zhangting2020): when force_cpu is removed, clear warning below.
if _current_device is not None:
if self._has_kernel(type):
op_device = op_maker.kOpDeviceAttrName()
op_attrs[op_device] = _current_device
else:
warnings.warn("The Op(%s) is not support to set device." %
type)
if 'force_cpu' in op_attrs:
if (type is 'less_than' and op_attrs['force_cpu'] != None
) or op_attrs['force_cpu'] != False:
warnings.warn(
"The Attr(force_cpu) of Op(%s) will be deprecated in the future, "
"please use 'device_guard' instead. 'device_guard' has higher priority when they are "
"used at the same time." % type)
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
return True
return False
if inputs is not None:
for in_proto in proto.inputs:
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
if found:
in_args = inputs[in_proto.name]
if not isinstance(in_args, (list, tuple)):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_args)))
in_arg_names = []
for index, arg in enumerate(in_args):
if isinstance(arg, six.string_types):
in_arg_names.append(arg)
elif isinstance(arg, six.binary_type):
in_arg_names.append(arg.decode())
elif isinstance(arg, (Variable, core.VarBase)):
in_arg_names.append(cpt.to_text(arg.name))
else:
raise TypeError(
"The type of '%s' in operator %s should be "
"one of [basestring(), str, Varibale] in python2, "
"or one of [str, bytes, Variable] in python3."
"but received : %s" %
(in_proto.name, type, arg))
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
if outputs is not None:
for m in proto.outputs:
if (m.name not in outputs) and m.dispensable:
continue
if not ((m.name in outputs) or m.dispensable):
raise ValueError(("Incorrect setting for output(s) of "
"operator \"%s\", should set: [%s].")
% (type, m.name))
for out_proto in proto.outputs:
if out_proto.name not in outputs:
continue
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given."
% (out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
if isinstance(arg, six.string_types):
out_arg_names.append(arg)
else:
out_arg_names.append(cpt.to_text(arg.name))
# TODO(minqiyang): could we remove variable's op in static mode?
if not in_dygraph_mode():
if isinstance(arg, six.string_types):
block.var(arg).op = self
else:
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if op_attrs is not None:
if not isinstance(op_attrs, dict):
raise TypeError("'attrs' should be a dict.")
for attr in proto.attrs:
attr_name = attr.name
if (attr_name not in op_attrs) or (
op_attrs[attr_name] is None):
continue
attr_val = op_attrs[attr_name]
self._update_desc_attr(attr_name, attr_val)
self.desc.check_attrs()
if self._has_kernel(type):
self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc)
def _has_kernel(self, op_type):
return op_type not in self.OP_WITHOUT_KERNEL_SET
def to_string(self, throw_on_error):
"""
Get debug string.
Args:
throw_on_error(bool): Whether to raise exception if self is not
initialized.
Returns:
str: The debug string.
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
return _debug_string_(proto, throw_on_error)
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Operator.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Operator string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
print(new_op._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
outputs_str = "{"
for i in range(0, len(self.output_names)):
outputs_str += "{name}=".format(name=self.output_names[i])
o = self.output(self.output_names[i])
outputs_str += "{value}".format(value=o)
if i != len(self.output_names) - 1:
outputs_str += ", "
outputs_str += "}"
inputs_str = "{"
for i in range(0, len(self.input_names)):
inputs_str += "{name}=".format(name=self.input_names[i])
o = self.input(self.input_names[i])
inputs_str += "{value}".format(value=o)
if i != len(self.input_names) - 1:
inputs_str += ", "
inputs_str += "}"
attr_names = sorted(self.attr_names)
attrs_str = ""
for i in range(0, len(attr_names)):
name = attr_names[i]
if skip_op_callstack and name == "op_callstack":
continue
attr_type = self.desc.attr_type(name)
if attr_type == core.AttrType.BLOCK:
a = "{name} = block[{value}]".format(
name=name, type=attr_type, value=self._block_attr_id(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
continue
if attr_type == core.AttrType.BLOCKS:
a = "{name} = blocks{value}".format(
name=name,
type=attr_type,
value=self._blocks_attr_ids(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
continue
a = "{name} = {value}".format(
name=name, type=attr_type, value=self.desc.attr(name))
attrs_str += a
if i != len(attr_names) - 1:
attrs_str += ", "
if outputs_str != "{}":
op_str = "{outputs} = {op_type}(inputs={inputs}, {attrs})".\
format(outputs=outputs_str, op_type=self.type,
inputs=inputs_str, attrs=attrs_str)
else:
op_str = "{op_type}(inputs={inputs}, {attrs})".\
format(op_type=self.type, inputs=inputs_str, attrs=attrs_str)
return op_str
def __str__(self):
return self._to_readable_code()
__repr__ = __str__
@property
def type(self):
return self.desc.type()
def input(self, name):
r"""
Get the input arguments according to the input parameter name.
Args:
name(str): The input parameter name.
Returns:
list: return the list of argument names that associated with \
the specific parameter name.
"""
return self.desc.input(name)
def _rename_input(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's input.
new_name(str): The new name of the Operator's input.
Returns:
None
"""
self.desc._rename_input(old_name, new_name)
def _rename_output(self, old_name, new_name):
"""
Rename the `old_name` to `new_name`.
Args:
old_name(str): The old name of the Operator's output.
new_name(str): The new name of the Operator's output.
Returns:
None
"""
self.desc._rename_output(old_name, new_name)
@property
def input_names(self):
return self.desc.input_names()
@property
def input_arg_names(self):
return self.desc.input_arg_names()
@property
def output_arg_names(self):
return self.desc.output_arg_names()
def output(self, name):
r"""
Get output arguments by the output parameter name.
Args:
name(str): The output parameter name.
Returns:
list: return the list of argument names associated with \
the specific parameter name.
"""
return self.desc.output(name)
@property
def output_names(self):
return self.desc.output_names()
@property
def idx(self):
for i, op in enumerate(self.block.ops):
if op == self:
return i
raise ValueError(
"Can't find op itself in it's block. It could be a bug of Paddle.")
def has_attr(self, name):
"""
Whether this Operator has the attribute with name or not.
Args:
name(str): the attribute name.
Returns:
bool: True if has this attribute.
"""
return self.desc.has_attr(name)
def attr_type(self, name):
"""
Get the type of attribute by attribute's name.
Args:
name(str): the attribute name.
Returns:
core.AttrType: the attribute type.
"""
return self.desc.attr_type(name)
def _set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
self._update_desc_attr(name, val)
def _remove_attr(self, name):
self.desc.remove_attr(name)
def _update_desc_attr(self, name, val):
"""
Update the value of desc's attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
Raises:
ValueError: If the type of value doesn't match with desc.attr_type(name).
"""
if isinstance(val, Block):
self.desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
self.desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
self.desc.set_serialized_attr(name, val.serialize_to_string())
else:
self.desc._set_attr(name, val)
@property
def attr_names(self):
return self.desc.attr_names()
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self.desc.attr(name)
def _block_attr_id(self, name):
"""
Get the block attribute's id by name.
Args:
name(str): the attribute name.
Returns:
int: the block index.
"""
return self.desc._block_attr_id(name)
def _block_attr(self, name):
"""
Get the block attribute by name.
Args:
name(str): the attribute name.
Returns:
block: the block attribute.
"""
id = self._block_attr_id(name)
assert (id >= 0 and id < len(self.block.program.blocks))
return self.block.program.blocks[id]
def _blocks_attr(self, name):
"""
Get the blocks attribute by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks attribute.
"""
attrs = []
for i in self._blocks_attr_ids(name):
assert (i >= 0 and i < len(self.block.program.blocks))
attrs.append(self.block.program.blocks[i])
return attrs
def _blocks_attr_ids(self, name):
"""
Get the blocks attribute's ids by name.
Args:
name(str): the attribute name.
Returns:
list: list of the blocks ids.
"""
return self.desc._blocks_attr_ids(name)
def all_attrs(self):
"""
Get the attribute dict.
Returns:
dict: The Operator's attribute dict, name->attr.
"""
attr_names = self.attr_names
attr_map = {}
for n in attr_names:
attr_type = self.desc.attr_type(n)
if attr_type == core.AttrType.BLOCK:
attr_map[n] = self._block_attr(n)
continue
if attr_type == core.AttrType.BLOCKS:
attr_map[n] = self._blocks_attr(n)
continue
attr_map[n] = self.attr(n)
return attr_map
def _is_optimize_op(self):
op_maker = core.op_proto_and_checker_maker
OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize
if not self.desc.has_attr(op_maker.kOpRoleAttrName()):
return False
op_role = self.desc.attr(op_maker.kOpRoleAttrName())
if op_role & int(OPTIMIZE):
return True
return False
def _is_backward_op(self):
op_maker = core.op_proto_and_checker_maker
BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward
if not self.desc.has_attr(op_maker.kOpRoleAttrName()):
return False
op_role = self.desc.attr(op_maker.kOpRoleAttrName())
if op_role & int(BACKWARD):
return True
return False
class Block(object):
"""
In Fluid, a Program is consistence of multi-Block, and Block stores
VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.
One block could have some child blocks, and child block's name scopes
should inherit the parent's so that OpDesc in child block can reference
a VarDesc that is stored in the parent block.
Please reference the framework.proto for details.
Args:
program(Program): The Program that the Block belongs to.
idx(int): The block's id in the Program.
Notes:
The constructor of Block should not be invoked directly. Please
use `Program._create_block()` to create a block.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
cur_block.append_op(type="abs",
inputs={"X": [var]},
outputs={"Out": [var]})
"""
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.program = program
self.removed_vars = collections.OrderedDict()
def __str__(self):
return self._to_readable_code()
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Block.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Block string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cur_program = fluid.Program()
cur_block = cur_program.current_block()
new_var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [new_var]},
outputs={"Out": [new_var]})
print(cur_block._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
block_str = "{ // block "
block_str += "{}\n".format(self.idx)
for var in list(self.vars.values()):
block_str += " {}\n".format(var._to_readable_code())
block_str += "\n"
for op in self.ops:
block_str += " {}\n".format(
op._to_readable_code(skip_op_callstack))
block_str += "}"
return block_str
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when
with_details is True. Default False.
Returns:
str: The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
re_add_indent = re.compile(r"\n(.)")
res_str = "blocks {\n idx: %d\n parent_idx: %d" % (
self.idx, self.parent_idx)
for var in list(self.vars.values()):
res_str += "\n vars {\n %s }" % re_add_indent.sub(
r"\n \1", var.to_string(throw_on_error, with_details))
for op in self.ops:
res_str += "\n ops {\n %s }" % re_add_indent.sub(
r"\n \1", op.to_string(throw_on_error))
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
__repr__ = __str__
@property
def parent_idx(self):
return self.desc.parent
@property
def forward_block_idx(self):
return self.desc.get_forward_block_idx()
def _set_forward_block_idx(self, idx):
"""
Set the forward block Idx.
Args:
idx(int): the block index.
Returns:
None
"""
self.desc._set_forward_block_idx(idx)
@property
def backward_block_idx(self):
cur_block_idx = self.idx
for block in self.program.blocks:
if block.forward_block_idx == cur_block_idx:
return block.idx
return -1
@property
def idx(self):
return self.desc.id
def var(self, name):
"""
Get a Variable by name from this block.
Args:
name(str): the Variable's name.
Raises:
ValueError: The If input's type is not str, or this block
doesn't have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"var require string as parameter, but get %s instead." %
(type(name)))
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name)
return v
def _find_var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Returns:
Variable: the Variable with the giving name. Or None if not found.
"""
frontier = list()
visited = set()
frontier.append(self)
prog = self.program
while len(frontier) != 0: # BFS
cur = frontier[0]
frontier = frontier[1:]
if id(cur) in visited:
continue
if cur.has_var(name):
return cur.var(name)
if cur.parent_idx != -1:
frontier.append(prog.block(cur.parent_idx))
if cur.forward_block_idx != -1:
frontier.append(prog.block(cur.forward_block_idx))
visited.add(id(cur))
return None
def _var_recursive(self, name):
"""
Get a Variable by name from this block recursively.
Args:
name(str): the Variable's name.
Raises:
ValueError: this block and this parent block doesn't
have a Variable with the giving name.
Returns:
Variable: the Variable with the giving name.
"""
var = self._find_var_recursive(name)
if var:
return var
else:
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in six.iteritems(self.vars)
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
if in_dygraph_mode():
var = _varbase_creator(*args, **kwargs)
else:
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
return var
def has_var(self, name):
return name in self.vars
def _rename_var(self, name, new_name):
"""
Rename variable in vars and ops' inputs and outputs
Args:
name(str): the name that need to be renamed.
new_name(str): the name that need to rename to.
Raises:
ValueError: If this block doesn't have this the giving name,
or the type of the var with the giving name is not Parameter
or Variable.
Returns:
Variable: the Variable with the giving name.
"""
name = cpt.to_text(name)
new_name = cpt.to_text(new_name)
if not self.has_var(name):
raise ValueError("var %s is not in current block" % name)
v = self.var(name)
if type(v) == Parameter:
var_type = "Parameter"
stop_gradient = v.stop_gradient
trainable = v.trainable
optimize_attr = v.optimize_attr
regularizer = v.regularizer
error_clip = v.error_clip
elif type(v) == Variable:
var_type = "Variable"
error_clip = v.error_clip
stop_gradient = v.stop_gradient
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))
# NOTE: v is destroyed by C++ after calling _rename_var.
d = self.desc.find_var(cpt.to_bytes(new_name))
if var_type == "Parameter":
if in_dygraph_mode():
var = ParamBase(
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip)
else:
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip)
elif var_type == "Variable":
var = Variable(
self,
type=orig_var_type,
name=new_name,
error_clip=error_clip,
stop_gradient=stop_gradient)
# rename the python side, _sync_with_cpp will only add
# new vars/ops to python side.
self.vars[new_name] = var
del self.vars[name]
self._sync_with_cpp()
return var
def _remove_var(self, name, sync=True):
if sync == True:
self._sync_with_cpp()
self.desc._remove_var(cpt.to_bytes(name))
del self.vars[name]
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = None
if in_dygraph_mode():
param = ParamBase(*args, **kwargs)
else:
param = Parameter(global_block, *args, **kwargs)
# NOTE: Why only set stop_gradient=False in static mode
# Because in dygraph mode, the `stop_gradient` and `trainable`
# are related, and `trainable` default vallue is `True` or
# it is specified by users, there is no need to set
# `stop_gradient` for ParamBase here.
param.stop_gradient = False
if 'initializer' in kwargs:
def _is_inited_by(block, var):
init_ops = []
for op in block.ops:
if var.name in op.output_arg_names:
# In startup_program, "c_broadcast" and "c_sync_comm_stream"
# are treated as initialization ops that cause error.
# Think of "c_broadcast" and "c_sync_comm_stream" as a special case here.
if op.type in ["c_broadcast", "c_sync_comm_stream"]:
continue
init_ops.append(op)
return init_ops
initializer = kwargs['initializer']
init_ops = _is_inited_by(global_block, param)
init_ops_len = len(init_ops)
if init_ops_len > 1:
raise RuntimeError("param " + param.name +
" is inited by multiple init ops " + str(
init_ops))
elif init_ops_len == 1:
# TODO already inited, do nothing, should log a warning
pass
else:
initializer(param, self)
return param
def append_op(self, *args, **kwargs):
"""
Appends a new Operator according to the giving arguments.
Returns:
Operator: the append Operator.
"""
if in_dygraph_mode():
attrs = kwargs.get("attrs", {})
type = kwargs.get("type", None)
op = Operator(
block=self,
desc=None,
type=type,
inputs=None,
outputs=None,
attrs=attrs)
# record ops in tracer rather than blocks
#
# TODO(minqiyang): add op stop_gradient support in static mode too.
# currently, we only support stop_gradient in dygraph mode.
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc.append_op()
op = Operator(
block=self,
desc=op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.append(op)
return op
def _insert_op(self, index, *args, **kwargs):
"""
Insert a Operator according to the giving arguments.
Args:
index(int): the place that the operator to insert.
Returns:
Operator: the insert Operator.
"""
self._sync_with_cpp()
op_desc = self.desc._insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def _insert_op_without_sync(self, index, *args, **kwargs):
"""
Insert an Operator according to the giving arguments,
without sync_with_cpp to meke the compilation faster.
Args:
index(int): the place that the operator to insert.
Returns:
Operator: the insert Operator.
"""
op_desc = self.desc._insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def _remove_op(self, index, sync=True):
"""
Remove the specific position operator.
Args:
index(int): the position that the operator to insert.
Returns:
None
"""
if sync == True:
self._sync_with_cpp()
self.desc._remove_op(index, index + 1)
del self.ops[index]
def _slice_ops(self, start, end):
"""
Return the Operator between start and end.
Args:
start(int): the start position.
end(int): the end position.
Returns:
list: the Operators between start and end.
"""
return self.ops[start:end]
def _prepend_op(self, *args, **kwargs):
if in_dygraph_mode():
type = kwargs.get("type", None)
attrs = kwargs.get("attrs", {})
op = Operator(
self, None, type=type, inputs=None, outputs=None, attrs=attrs)
_dygraph_tracer().trace_op(type,
kwargs.get("inputs", {}),
kwargs.get("outputs", {}), attrs
if attrs else {},
kwargs.get("stop_gradient", False))
else:
op_desc = self.desc._prepend_op()
op = Operator(
self,
op_desc,
type=kwargs.get("type", None),
inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None))
self.ops.insert(0, op)
return op
def _sync_with_cpp(self):
"""
Sync from the desc on the c++ end. This method is used to synchronize
the c++ desc instance generated by backward.
"""
# sync variables from cpp
for var in self.desc.all_vars():
if not self.has_var(var.name()):
self.create_var(name=var.name(), desc=var, type=var.type())
# sync variables removed from c++ end
for var in list(self.vars.keys()):
if not self.desc.find_var(cpt.to_bytes(var)):
self.vars.pop(var)
# sync operators from cpp
ops_in_cpp = []
for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None
end_index = None
for index in range(len(ops_in_cpp)):
if first_op_in_python == ops_in_cpp[index]:
start_index = index
if last_op_in_python == ops_in_cpp[index]:
end_index = index
assert start_index is not None
assert end_index is not None
assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.insert(0, op)
# sync ops append to the end of cpp_ops
for index in range((end_index + 1), len(ops_in_cpp)):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.append(op)
# sync ops removed from c++ end
if end_index != -1 and end_index < len(self.ops):
ops_in_cpp_index = 0
ops_in_python_index = 0
while ops_in_python_index < len(
self.ops) and ops_in_cpp_index < len(ops_in_cpp):
if self.ops[ops_in_python_index].desc != ops_in_cpp[
ops_in_cpp_index]:
del self.ops[ops_in_python_index]
else:
ops_in_cpp_index += 1
ops_in_python_index += 1
assert len(self.ops) == len(ops_in_cpp)
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from the other block.
Args:
other(Block): the other block.
Raises:
ValueError: If type of input is not Block, or the `other` and this
block is not in the same topology.
Returns:
None
"""
if not isinstance(other, Block):
raise TypeError(
"_copy_param_info_from should be invoked with Block")
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
# if the Parameter is pruned, v may be None
continue
assert isinstance(v, Variable)
new_p = None
if in_dygraph_mode():
new_p = ParamBase(
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name)
else:
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level
if v.type == core.VarDesc.VarType.LOD_TENSOR else None,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name)
self.vars[new_p.name] = new_p
def _clone_variable(self, var, force_persistable=True):
"""
Clone a variable into current block.
Args:
var: the variable to be cloned.
force_persistable(bool): True means setting the result variable to being persistable.
False means setting the persistable the same with that of input var.
default: True.
Returns:
Variable: the new variable cloned from 'var' in current block.
"""
assert isinstance(var, Variable)
ret_var = None
# make STEP_SCOPES var can be safely cloned.
if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.RAW:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
else:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True if force_persistable else var.persistable,
is_data=var.is_data,
need_check_feed=var.desc.need_check_feed())
return ret_var
class IrNode(object):
"""
Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.
"""
def __init__(self, node):
"""
Construct an IrNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node,
core.Node), 'node must be the instance of core.Node.'
self.node = node
def name(self):
"""
Return the node name.
Returns:
str: node name.
"""
return self.node.name()
def node_type(self):
"""
Return the node type.
Returns:
core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).
"""
return self.node.node_type()
def var(self):
"""
Return the node variable description.
Returns:
core.VarDesc: node variable description.
"""
return self.node.var()
def op(self):
"""
Return the node operator description.
Returns:
core.OpDesc: node operator description.
"""
return self.node.op()
def id(self):
"""
Return the node id.
Returns:
int: node id.
"""
return self.node.id()
def is_op(self):
"""
If the node is an operator, then return true.
Returns:
bool: indicate whether the node is an operator.
"""
return self.node.is_op()
def is_var(self):
"""
If the node is a variable, then return true.
Returns:
bool: indicate whether the node is a variable.
"""
return self.node.is_var()
def is_ctrl_var(self):
"""
If the node is a control dependence variable, then return true.
Returns:
bool: indicate whether the node is a control dependence variable.
"""
return self.node.is_ctrl_var()
def clear_inputs(self):
"""
Clear the node inputs. After executing the `clear_inputs` function,
the node inputs will be empty.
"""
self.node.clear_inputs()
def remove_input_by_id(self, node_id):
"""
Remove a node from inputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_input(node_id)
def remove_input(self, node):
"""
Remove a node from inputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_input(node.node)
def append_input(self, node):
"""
Append a node in inputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_input(node.node)
def clear_outputs(self):
"""
Clear the node outputs. After executing the `clear_outputs` function,
the node outputs will be empty.
"""
self.node.clear_outputs()
def remove_output_by_id(self, node_id):
"""
Remove a node from outputs by the given node id.
Args:
node_id(int): the given node id.
"""
self.node.remove_output(node_id)
def remove_output(self, node):
"""
Remove a node from outputs.
Args:
node(IrNode): the node being removed.
"""
self.node.remove_output(node.node)
def append_output(self, node):
"""
Append a node in outputs.
Args:
node(IrNode): the node being appended.
"""
self.node.append_output(node.node)
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrNode): node inputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrNode): node outputs wrapped by IrNode.
"""
return [IrNode(n) for n in self.node.outputs]
class IrVarNode(IrNode):
"""
Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrVarNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_var(), \
'node must be the instance of core.Node and it must be a variable node.'
super(IrVarNode, self).__init__(node)
self.node = node
def set_shape(self, shape):
"""
Set the node variable shape.
Args:
shape(list): shape to be set.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
self.node.var().set_shape(shape)
def persistable(self):
"""
If the variable node is a persistable variable, then return true.
Returns:
bool: indicate whether the variable is persistable.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().persistable()
def type(self):
"""
Return the variable type.
Returns:
core.VarDesc.VarType: the variable type.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().type()
def dtype(self):
"""
Return the variable data type.
Returns:
core.VarDesc.VarType: the variable data type.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().dtype()
def shape(self):
"""
Return the variable shape.
Returns:
list: the variable shape.
"""
assert self.node.var() is not None, \
"The node variable description can not be None."
return self.node.var().shape()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrOpNode): node inputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrOpNode): node outputs wrapped by IrOpNode.
"""
return [IrOpNode(n) for n in self.node.outputs]
class IrOpNode(IrNode):
"""
Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.
"""
def __init__(self, node):
"""
Construct an IrOpNode using core.Node.
Args:
node(core.Node): C++ Node.
"""
assert isinstance(node, core.Node) and node.is_op(), \
'node must be the instance of core.Node and it must be a operator node.'
super(IrOpNode, self).__init__(node)
self.node = node
def rename_input(self, old_input_name, new_input_name):
"""
Rename the input of this node.
Args:
old_input_name(str): the old input name.
new_input_name(str): the new input name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
self.node.op()._rename_input(old_input_name, new_input_name)
def rename_output(self, old_output_name, new_output_name):
"""
Rename the output of this node.
Args:
old_output_name(str): the old output name.
new_output_name(str): the new output name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
self.node.op()._rename_output(old_output_name, new_output_name)
def input(self, name):
"""
Get the argument name list by the parameter name for input.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().input(name)
def output(self, name):
"""
Get the argument name list by the parameter name for output.
Args:
name(str): the parameter name.
Returns:
list(str): the argument name list.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().output(name)
def set_type(self, new_type):
"""
Change the operator type into new type.
Args:
new_type(str): new operator type to be set.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().set_type(new_type)
def set_attr(self, name, val):
"""
Set the value of attribute by attribute's name.
Args:
name(str): the attribute name.
val(bool|int|str|float|list): the value of the attribute.
"""
self._update_desc_attr(name, val)
def _update_desc_attr(self, name, val):
"""
Update the value of the op desc's attribute by attribute's name.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
desc = self.node.op()
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and \
all(isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
def input_arg_names(self):
"""
Return input arguments' names of this op node.
Returns:
list(str): input arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().input_arg_names()
def output_arg_names(self):
"""
Return output arguments' names of this op node.
Returns:
list(str): output arguments' names of this op node.
"""
assert self.node.op() is not None, \
"The node operator description can not be None."
return self.node.op().output_arg_names()
@property
def inputs(self):
"""
Return the node inputs.
Returns:
list(IrVarNode): node inputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.inputs]
@property
def outputs(self):
"""
Return the node outputs.
Returns:
list(IrVarNode): node outputs wrapped by IrVarNode.
"""
return [IrVarNode(n) for n in self.node.outputs]
class IrGraph(object):
"""
Python IrGraph. Beneath it is a core.Graph, which is used for
creating a c++ Ir Pass Graph. An IrGraph is just a graph view of
a Program. In an IrGraph, both Variables and Operators are graph
nodes.
"""
def __init__(self, graph, for_test=False):
"""
Construct an IrGraph using core.Graph.
Args:
graph(core.Graph): C++ Graph.
for_test(bool): True for the test graph and false for the train graph.
"""
assert isinstance(
graph, core.Graph), 'graph must be the instance of core.Graph.'
self.graph = graph
self._for_test = for_test
def clone(self):
"""
Create a new and duplicated IrGraph.
Warns:
The method only clones the graph structure, not its attributes.
Returns:
IrGraph: A new and duplicated graph.
"""
g = self.graph.clone()
return IrGraph(g, self._for_test)
def is_test(self):
"""
If the graph is used for testing, the function returns true. Otherwise, returns false.
"""
return self._for_test
def all_nodes(self):
"""
Return all nodes included in the graph as a set.
"""
return {IrNode(node) for node in self.graph.nodes()}
def all_var_nodes(self):
"""
Return all variable nodes included in the graph as a set.
"""
return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}
def all_persistable_nodes(self):
"""
Return all persistable variable nodes included in the graph as a set.
"""
persistable_nodes = set()
for node in self.graph.nodes():
if node.is_var() and node.var() is not None and node.var(
).persistable():
persistable_nodes.add(node)
return {IrVarNode(p) for p in persistable_nodes}
def all_op_nodes(self):
"""
Return all operator nodes included in the graph as a set.
"""
return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}
def create_persistable_node(self, name, var_type, shape, var_dtype):
"""
Create a persistable variable node in the graph. In IrGraph,
it can not distinguish between persistable variables and parameters.
Args:
name(str): the name of the persistable variable node.
vart_type(core.VarDesc.VarType): the type of the persistable variable node.
shape(list): the shape of the persistable variable node.
var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.
Returns:
IrVarNode: the created persistable variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
var_desc.set_persistable(True)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_var_node(self, name, var_type, shape, var_dtype):
"""
Create a variable node in the graph. The created variable node is
not persistable.
Args:
name(str): the name of the variable node.
vart_type(core.VarDesc.VarType): the type of the variable node.
shape(list): the shape of the variable node.
var_dtype(core.VarDesc.VarType): the data type of the variable node.
Returns:
IrVarNode: the created variable node.
"""
var_desc = core.VarDesc(name)
var_desc.set_type(var_type)
var_desc.set_shape(shape)
var_desc.set_dtype(var_dtype)
return IrVarNode(self.graph.create_var_node(var_desc))
def create_control_dep_var(self):
"""
create a control var
"""
return IrVarNode(self.graph.create_control_dep_var())
def create_var_node_from_desc(self, var_desc):
"""
Create a variable node by using an existing VarDesc in the graph.
Depend on the giving VarDesc, the created variable node may be persistable.
Args:
var_desc(core.VarDesc): the giving variable description.
Returns:
IrVarNode: the created variable node.
"""
return IrVarNode(self.graph.create_var_node(var_desc))
def create_op_node(self, op_type, attrs, inputs, outputs):
"""
Create a operator node in the graph.
Args:
op_type(str): the type of the operator node.
attrs(dict): the attributes of the operator node.
inputs(dict): the inputs of the operator node.
outputs(dict): the outputs of the operator node.
Returns:
IrOpNode: the created operator node.
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in six.iteritems(attrs):
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in six.iteritems(inputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in six.iteritems(outputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
[var_node.name() for var_node in var_nodes])
return IrOpNode(self.graph.create_op_node(op_desc))
def create_op_node_from_desc(self, op_desc):
"""
Create a operator node by using an existing OpDesc in the graph.
Args:
op_desc(core.VarDesc): the giving operator description.
Returns:
IrOpNode: the created operator node.
"""
return IrOpNode(self.graph.create_op_node(op_desc))
def update_input_link(self, old_input_node, new_input_node, op_node):
"""
Update the input's link of a operator node.
Args:
old_input_node(IrNode): the old input node of the giving op_node.
new_input_node(IrNode): the new input node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_input_node.node in self.graph.nodes() and new_input_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'
old_input_node.remove_output(op_node)
op_node.remove_input(old_input_node)
new_input_node.append_output(op_node)
op_node.append_input(new_input_node)
op_node.rename_input(old_input_node.name(), new_input_node.name())
def update_output_link(self, old_output_node, new_output_node, op_node):
"""
Update the output's link of an operator node.
Args:
old_output_node(IrNode): the old output node of the giving op_node.
new_output_node(IrNode): the new output node of the giving op_node.
op_node(IrOpNode): the operator node that is needed to update input's link.
"""
assert old_output_node.node in self.graph.nodes() and new_output_node.node in \
self.graph.nodes() and op_node.node in self.graph.nodes(), \
'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.'
old_output_node.remove_input(op_node)
op_node.remove_output(old_output_node)
new_output_node.append_input(op_node)
op_node.append_output(new_output_node)
op_node.rename_output(old_output_node.name(), new_output_node.name())
def link_to(self, node_in, node_out):
"""
Connect two nodes.
Args:
node_in(IrNode): the input node.
node_out(IrNode): the output node.
"""
assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \
'The two arguments(node_in&node_out) must be in the graph nodes.'
node_in.append_output(node_out)
node_out.append_input(node_in)
def safe_remove_nodes(self, remove_nodes):
"""
Remove nodes safely since links connected to these removed nodes are
also removed.
Args:
remove_nodes(set): the nodes prepared to be removed.
"""
if not isinstance(remove_nodes, set):
if isinstance(remove_nodes, Iterable):
remove_nodes = set(remove_nodes)
else:
remove_nodes = {remove_nodes}
original_nodes = {n.node for n in remove_nodes}
core.graph_safe_remove_nodes(self.graph, original_nodes)
def resolve_hazard(self):
ordered_nodes = core.topology_sort(self.graph)
var_nodes = dict()
for node in ordered_nodes:
if node.is_op() and node.op() is not None:
for each_var_name in node.op().input_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.inputs, each_var_name)
]
for each_var_name in node.op().output_arg_names():
if each_var_name not in var_nodes:
var_nodes[each_var_name] = [
self._find_node_by_name(node.outputs, each_var_name)
]
else:
var_nodes[each_var_name].append(
self._find_node_by_name(node.outputs,
each_var_name))
self.graph.resolve_hazard(var_nodes)
def has_circle(self):
"""
Check if the graph has a circle.
Returns:
bool: True if the graph has a circle else False.
"""
return core.has_circle(self.graph)
def graph_num(self):
"""
Count the number of unconnected graphs in this graph.
Returns:
int: the number of unconnected graphs.
"""
return core.graph_num(self.graph)
def topology_sort(self):
"""
Perform the topology sort operation on the graph.
Notes: the `graph` can not contain a circle.
Returns:
list(IrNode): nodes in topology order.
"""
ordered_nodes = core.topology_sort(self.graph)
return [IrNode(n) for n in ordered_nodes]
def build_adjacency_list(self):
"""
Build an adjacency list of operations for the `graph`.
Returns:
dict{IrNode: set(IrNode)}: the adjacency list.
"""
adj_list = core.build_adjacency_list(self.graph)
wrapped_adj_list = dict()
for k, v in six.iteritems(adj_list):
wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}
return wrapped_adj_list
def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):
"""
Draw the graph. If `dot` command is installed, the drawn graph
will be saved as pdf file type, otherwise dot file type is used.
Args:
save_path(str): the save path of drawn graph.
name(str): the name of drawn graph.
marked_nodes(set(IrNode)): nodes that are needed to be marked.
Default value is None.
remove_ctr_var(bool): If it is set True, all control variable nodes
in the graph will be removed. Default value is True.
"""
def _convert_to_pdf(dot_file_path):
pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'
exited_code = subprocess.call(
'dot -Tpdf ' + dot_file_path + ' -o ' + pdf_save_path,
shell=True)
if exited_code != 0:
print('The dot command is needed for creating pdf files.')
print('The {} is saved as the dot filetype.'.format(
dot_file_path))
remove_ctr_vars = set()
if remove_ctr_var:
for node in self.all_var_nodes():
if node.is_ctrl_var():
remove_ctr_vars.add(node)
self.safe_remove_nodes(remove_ctr_vars)
print('Total ops num = {}.'.format(len(self.all_op_nodes())))
if marked_nodes is not None:
if not isinstance(marked_nodes, set):
if isinstance(marked_nodes, Iterable):
marked_nodes = set(marked_nodes)
else:
marked_nodes = {marked_nodes}
marked_nodes = {n.node for n in marked_nodes}
remove_ctr_vars = {n.node for n in remove_ctr_vars}
marked_nodes = marked_nodes - remove_ctr_vars
if self.graph.has('__graphviz__marked_node__'):
self.graph.erase('__graphviz__marked_node__')
self.graph.set('__graphviz__marked_node__', marked_nodes)
if not os.path.exists(save_path):
os.makedirs(save_path)
viz_dot_path = os.path.join(save_path, name) + '.dot'
viz_pass = core.get_pass('graph_viz_pass')
viz_pass.set('graph_viz_path', viz_dot_path)
viz_pass.apply(self.graph)
_convert_to_pdf(viz_dot_path)
def to_program(self):
"""
Convert the graph into a Program.
WARN: When the graph includes backward operator nodes, the
conversion process may be failed. Usually, this function is
only used to convert a test graph.
Returns:
Program: a program converted from the graph.
"""
convert_pass = core.get_pass('graph_to_program_pass')
desc = core.ProgramDesc()
convert_pass.set_not_owned('program', desc)
convert_pass.apply(self.graph)
program = Program._construct_from_desc(desc)
return program
def _find_node_by_name(self, nodes, node_name):
"""
Find a node in the giving nodes set by the name.
"""
target_node = None
for n in nodes:
if n.name() == node_name:
target_node = n
assert target_node is not None, "Cannot find the target node in the giving set."
return target_node
def _update_desc_attr(self, desc, name, val):
"""
Update the value of desc's attribute by attribute's name.
"""
if isinstance(val, Block):
desc.set_block_attr(name, val.desc)
elif isinstance(val, list) and val and all(
isinstance(v, Block) for v in val):
desc.set_blocks_attr(name, [v.desc for v in val])
elif isinstance(val, core.BlockDesc) or \
isinstance(val, core.ProgramDesc):
desc.set_serialized_attr(name, val.serialize_to_string())
else:
desc._set_attr(name, val)
class Program(object):
"""
Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the
control flow op like conditional_block, while :ref:`api_paddle_fluid_layers_While` is included,
it will contain nested block.
Please reference the
`framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_
for details.
A set of Program usually contains startup program and main program.
A startup program is set to contain some initial work, eg. initialize the ``Parameter``, and the main
program will contain the network structure and vars for train.
A set of Program can be used for test or train, in train program ,
Paddle will contain all content to build a train network, in test
program Paddle will prune some content which is irrelevant to test, eg.
backward ops and vars.
**Notes**:
**we have** :ref:`api_paddle_fluid_framework_default_startup_program` **and** :ref:`api_paddle_fluid_framework_default_main_program`
**by default, a pair of them will shared the parameters. The** :ref:`api_paddle_fluid_framework_default_startup_program` **only run once to initialize parameters,**
:ref:`api_paddle_fluid_framework_default_main_program` **run in every mini batch and adjust the weights.**
Returns:
Program: An empty Program.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
main_program = static.Program()
startup_program = static.Program()
with static.program_guard(main_program=main_program, startup_program=startup_program):
x = static.data(name="x", shape=[-1, 784], dtype='float32')
y = static.data(name="y", shape=[-1, 1], dtype='int32')
z = static.nn.fc(name="fc", x=x, size=10, activation="relu")
print("main program is: {}".format(main_program))
print("start up program is: {}".format(startup_program))
"""
def __init__(self):
self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)]
self.current_block_idx = 0
global global_prog_seed
self._seed = global_prog_seed
self._current_role = core.op_proto_and_checker_maker.OpRole.Forward
self.__op_role_var = []
# for distribute training
# _is_distributed = True if under distributed training
self._is_distributed = False
# _is_chief = True if the trainer is the first one, usually No.0
self._is_chief = False
# _parameters_on_pservers records all the parameters distributed on parameter servers.
self._parameters_on_pservers = None
# _endpoints is a list about parameter servers ip:port, such as ["ip:port","ip:port"]
self._endpoints = []
# if current role is parameter server, the _ps_endpoint is its "ip:port"
self._ps_endpoint = None
# trainers_endpoints, it is used for distribution.
self._trainers_endpoints = []
# the distributed lookup table names
self._distributed_lookup_table = None
# use Deep gradient comrepssion or not
self._enable_dgc = False
self._use_lamb = False
self._nccl_comm_num = 1
self._use_hierarchical_allreduce = False
self._hierarchical_allreduce_inter_nranks = 0
# if this program has been optimized by distributed optimizer
# fleet_opt will be given a value
self._fleet_opt = None
self._program_config = None
# assigned if this program has been parsed by a pipeline optimizer
self._pipeline_opt = None
# appending gradients times
self._appending_grad_times = 0
# identifier for auto checkpoint
self._auto_checkpoint_name = unique_name.generate(
"__auto_checkpoint_program__")
# compiled program, i.e. Graph
self._graph = None
def global_seed(self, seed=0):
"""
Set global seed for Program
Returns:
None.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
print(prog.random_seed)
## 0
## the default random seed is 0
prog.global_seed(102)
prog1 = static.default_main_program()
print(prog1.random_seed)
## 102
## the random seed is 102
"""
global global_prog_seed
global_prog_seed = seed
self._seed = global_prog_seed
@property
def _op_role(self):
"""
The operator role. In a enum {Forward, Backward, Optimize}.
Notes: this is a low level API. It is used only for ParallelExecutor to
duplicate or schedule operator to devices.
For example, the forward operator should be executed on every device.
The backward operator should be executed on every device and the
parameter gradient of backward (use :code:`_op_role_var` to get this
variable) operator should be merged to one device. The optimization
operators should be executed on only one device and broadcast the
optimization result, i.e., the new parameter, to every other device.
"""
return self._current_role
@_op_role.setter
def _op_role(self, role):
self._current_role = role
@property
def _op_role_var(self):
"""
The auxiliary variables for :code:`_op_role` property.
See Also: :code:`Program._op_role`'s documentation for details.
Notes: This is a very low-level API. Users should not use it directly.
"""
return self.__op_role_var
@signature_safe_contextmanager
def _backward_role_guard(self):
tmp_role = self._current_role
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Backward
try:
yield
finally:
self._current_role = tmp_role
@signature_safe_contextmanager
def _optimized_guard(self, param_and_grads):
"""
A with guard to set :code:`Optimization` :code:`OpRole` and
:code:`OpRoleVar` automatically.
Notes: This is a very low level API. Users should not use it directly.
Args:
param_and_grads(list): The variables (names) to be optimized.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program._optimized_guard([p,g]):
>>> p = p - 0.001 * g
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.Optimize
self.__op_role_var = [
var.name if isinstance(var, Variable) else var
for var in param_and_grads
]
try:
yield
finally:
self.__op_role_var = tmp_var
self._current_role = tmp_role
@signature_safe_contextmanager
def _lr_schedule_guard(self, is_with_opt=False):
"""
A with guard to set :code:`LRSched` :code:`OpRole` and
:code:`OpRoleVar` automatically. The :code:`OpRoleVar` is
set to the target learning rate.
Notes: This is a very low level API. Users should not use it directly.
Args:
is_with_opt: Only set to true if these ops a in the middle
of a bunch of optimize ops so that it can be treated
correctly. For example, sgd->lr_op->sgd->lr_op->sgd.
Examples:
>>> import paddle.fluid as fluid
>>> p, g = backward(...)
>>> with program.lr_schedule_guard():
>>> lr = lr * decay
"""
tmp_role = self._current_role
tmp_var = self.__op_role_var
OpRole = core.op_proto_and_checker_maker.OpRole
self._current_role = OpRole.LRSched
if is_with_opt:
self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)
# TODO(typhoonzero): how to set target learning rate var
self.__op_role_var = []
try:
yield
finally:
self.__op_role_var = tmp_var
self._current_role = tmp_role
def __str__(self):
"""
Get the protobuf debug string of this Program.
Returns:
(str): The protobuf debug string.
Raises:
ValueError: If any of required fields is not set.
"""
return self._to_readable_code()
def _to_readable_code(self, skip_op_callstack=True):
"""
Get readable debug string of Program.
.. note::
If you want to get the debug string in protobuf format,
please use :code:`to_string` method.
Args:
skip_op_callstack(bool): whether to skip parsing Operator's attribute
op_callstack, default value is True
Returns:
string: The formatted Program string.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
cur_program = static.Program()
cur_block = cur_program.current_block()
new_var = cur_block.create_var(name="X",
shape=[-1, 23, 48],
dtype='float32')
new_op = cur_block.append_op(type="abs",
inputs={"X": [new_var]},
outputs={"Out": [new_var]})
print(cur_program._to_readable_code())
"""
assert isinstance(
skip_op_callstack, bool
), "skip_op_callstack parameter's type is error, expect bool, received %s".format(
type(skip_op_callstack))
program_str = ""
for block in self.blocks:
program_str += block._to_readable_code(skip_op_callstack)
program_str += '\n'
return program_str
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error (bool): raise Value error when any of required fields is not set.
with_details (bool): True if more details about variables and parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need to print.
Returns:
str: The debug string describe current Program.
Raises:
ValueError: If any of required fields is not set and throw_on_error is True.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
x = static.data(name="X", shape=[2,3], dtype="float32")
pred = static.nn.fc(x, size=3)
prog_string = prog.to_string(throw_on_error=True, with_details=False)
prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True)
print("program string without detail: {}".format(prog_string))
print("program string with detail: {}".format(prog_string_with_details))
"""
assert isinstance(
throw_on_error, bool
), "The type of throw_on_error parameter is wrong, expected bool, but received {}.".format(
type(throw_on_error))
assert isinstance(
with_details, bool
), "The type of with_details parameter is wrong, expected bool, but received {}.".format(
type(with_details))
if with_details:
res_str = ""
for block in self.blocks:
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(
six.binary_type(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
def _get_desc(self):
"""
Get the C++ side of `ProgramDesc` object pointer. The C++ object is
exposed by :code:`pybind`.
Notes: This is a very low level API. Users should not use this API
directly.
"""
return self.desc
def _version(self):
return self.desc._version()
def clone(self, for_test=False):
"""
.. note:::
1. :code:`Program.clone()` method DOES NOT clone :ref:`api_paddle_io_DataLoader` .
2. Recommend you to use :code:`clone` before using :code:`Opimizer.minimize` .
3. This API has no effect in Dygraph Mode.
Create a new Program with forward content of original one when ``for_test=True``.
Create a new Program as same as the original one when ``for_test=False``.
Some operators, e.g., :ref:`api_paddle_fluid_layers_batch_norm` , behave differently between
training and testing. They have an attribute, :code:`is_test`, to
control this behaviour. This method will change the :code:`is_test`
attribute of them to :code:`True` when :code:`for_test=True`.
* Set for_test to False when you want to clone the program for training.
* Set for_test to True when you want to clone the program for testing.
We will prune the backward and optimize part of the program when you
use :code:`clone` after :code:`Opimizer.minimize`, but we still
recommend you to use :code:`clone` before using :code:`Opimizer.minimize`.
For Example:
::
import paddle
import paddle.static as static
paddle.enable_static()
img = static.data(name='image', shape=[None, 784])
pred = static.nn.fc(x=img, size=10, actvation='relu')
loss = paddle.mean(pred)
# Here we use clone before Momentum
test_program = static.default_main_program().clone(for_test=True)
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9)
optimizer.minimize(loss)
Args:
for_test (bool): True if change the :code:`is_test` attribute of operators to :code:`True`
and prune the backward and optimize part of the program. The default value is :code:`False` .
Returns:
Program: A new Program with forward content of original one when ``for_test=True``. A new Program as same as the original one when ``for_test=False``
Examples:
.. note::
The Program's order maybe different after :code:`clone` and
this will not affect your training or testing progress. In the following
example we give you an simple method :code:`print_prog(program)` to
print Program Descs inorder to make sure you have same print result
after :code:`clone`:
.. code-block:: python
import six
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
1. To clone a test program, the sample code is:
.. code-block:: python
import six
import paddle
import paddle.static as static
import paddle.utils as utils
import paddle.nn.functional as F
paddle.enable_static()
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
train_program = static.Program()
startup_program = static.Program()
# startup_program is used to do some parameter init work,
# and main program is used to hold the network
with static.program_guard(train_program, startup_program):
with utils.unique_name.guard():
img = static.data(name='image', shape=[None, 784])
hidden = static.nn.fc(x=img, size=200, activation='relu')
hidden = F.dropout(hidden, p=0.5)
loss = F.cross_entropy(
input=static.nn.fc(x=hidden, size=10, activation='softmax'),
label=static.data(name='label', shape=[1], dtype='int64'))
avg_loss = paddle.mean(loss)
test_program = train_program.clone(for_test=True)
print_prog(test_program)
# Due to parameter sharing usage for train and test, so we need to use startup program of train
# instead of using test startup program, while nothing is in test's startup program
# In Paddle we will share weights by using the same Tensor name. In train and test program
# all parameters will have the same name and this can make train and test program sharing parameters,
# that's why we need to use startup program of train. And for startup program of test, it has nothing,
# since it is a new program.
with static.program_guard(train_program, startup_program):
with utils.unique_name.guard():
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
2. The clone method can be avoid if you create program for training and program for testing individually.
.. code-block:: python
import six
import paddle
import paddle.static as static
import paddle.utils as utils
import paddle.nn.functional as F
paddle.enable_static()
def print_prog(prog):
for name, value in sorted(six.iteritems(prog.block(0).vars)):
print(value)
for op in prog.block(0).ops:
print("op type is {}".format(op.type))
print("op inputs are {}".format(op.input_arg_names))
print("op outputs are {}".format(op.output_arg_names))
for key, value in sorted(six.iteritems(op.all_attrs())):
if key not in ['op_callstack', 'op_role_var']:
print(" [ attrs: {}: {} ]".format(key, value))
def network():
img = static.data(name='image', shape=[None, 784])
hidden = static.nn.fc(x=img, size=200, activation='relu')
hidden = F.dropout(hidden, p=0.5)
loss = F.cross_entropy(
input=static.nn.fc(x=hidden, size=10, activation='softmax'),
label=static.data(name='label', shape=[1], dtype='int64'))
avg_loss = paddle.mean(loss)
return avg_loss
train_program_2 = static.Program()
startup_program_2 = static.Program()
test_program_2 = static.Program()
with static.program_guard(train_program_2, startup_program_2):
with utils.unique_name.guard():
avg_loss = network()
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(avg_loss)
# the test startup program is not used.
with static.program_guard(test_program_2, startup_program_2):
with utils.unique_name.guard():
avg_loss = network()
print_prog(test_program_2)
The two code snippets above will generate and print same programs.
"""
# NOTE(zhiqiu): we sync the original program first, since its program may diff with
# its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.
self._sync_with_cpp()
pruned_origin_block_id_map = None
if for_test:
forward_prog = Program()
forward_prog.desc, pruned_origin_block_id_map = core.prune_backward(
self.desc)
forward_prog.blocks = [
Block(forward_prog, i)
for i in six.moves.range(forward_prog.desc.num_blocks())
]
forward_prog._sync_with_cpp()
p = forward_prog._inference_optimize(prune_read_op=False)
else:
p = Program()
p.current_block_idx = self.current_block_idx
p._seed = self._seed
p.desc = core.ProgramDesc(self.desc)
p.blocks = [
Block(p, i) for i in six.moves.range(self.desc.num_blocks())
]
p._current_role = self._current_role
p.__op_role_var = self.__op_role_var
p._appending_grad_times = self._appending_grad_times
if hasattr(self, 'lr_sheduler'):
p.lr_sheduler = self.lr_sheduler
# NOTE(zhiqiu): we sync the cloned program, to update its program by
# its desc.
p._sync_with_cpp()
p._copy_param_info_from(self)
p._copy_data_info_from(self, pruned_origin_block_id_map)
p._copy_dist_param_info_from(self)
return p
def _prune(self, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`.
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
targets(list|Variable|Operator): A list of variables, operators, or variable names
need to be pruned
Returns:
Program: A new, pruned program.
"""
return self._prune_with_input([], targets)
def _prune_with_input(self, feeded_var_names, targets):
"""
Prune operators and variables which are not needed to generate
:code:`targets`. Prune operators and variables which are needed
to generate feeded_var
Notes: This is a very low level API. Users should not use this API
directly. This API is in flux and not stable.
Args:
feeded_var_names(list|str): A list of variable names from where
pruning start. If it is set as [], this API works just like _prune()
targets(list|Variable|Operator): A list of variables, operators, or variable names
need to be pruned
Returns:
Program: A new, pruned program.
"""
# NOTE(zhiqiu): we sync the original program first, since its program may diff with
# its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.
self._sync_with_cpp()
if not isinstance(feeded_var_names, list):
feeded_var_names = [feeded_var_names]
if not isinstance(targets, list):
targets = [targets]
for var in feeded_var_names:
if not isinstance(var, six.string_types):
raise ValueError(
"All feeded_var_names of Program._prune_with_input() can only be "
"str, but received %s." % type(var))
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
name = t.name
elif isinstance(t, six.string_types):
name = str(t)
else:
raise ValueError(
"All targets of Program._prune_with_input() can only be "
"Variable or Operator, but received %s." % type(t))
# NOTEZ(zhiqiu): For variable to be fed in fetch_list, there two cases:
# (1) the variable is leaf, it has no op that generates it;
# (2) the variable is not leaf, and we need to prune the op that generates it.
# In both cases, wo can just skip target_op of that it.
if name in feeded_var_names:
continue
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
target_op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if name in op.output_arg_names:
# NOTE(zhiqiu): Find op that generate target name.
# Skip optimize op except for optimize op in targets,
# since optimize op generates parameters.
if op._is_optimize_op() and op not in targets:
continue
else:
target_op = op
break
if target_op is None:
raise ValueError(
"The target variable used for pruning should have an "
"associated operator that generates it.")
else:
targets_idx.append([target_op.block.idx, target_op.idx])
else:
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc, pruned_origin_block_id_map = core.prune(self.desc,
set(feeded_var_names),
targets_idx)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
res._copy_param_info_from(self)
res._copy_data_info_from(self, pruned_origin_block_id_map)
res._copy_dist_param_info_from(self)
return res
def _inference_optimize(self, prune_read_op=True):
"""
This method will create a new program and do following adjustments on it:
1. Remove all reader variables and their creator ops if exist.
2. Remove the :code:`read_op` if exists.
3. change the :code:`is_test`
attribute of operators to :code:`True`. All the :code:`Parameter`
information will be lost.
Args:
prune_read_op(bool): remove the read ops that are added by py_reader
for cpp inference library
Notes: This API is a very low level API. Use
:code:`Program.clone(for_test=True)` instead.
Returns:
Program: The new program.
"""
res = Program()
res.desc = core.ProgramDesc(self.desc)
# remove all readers and the read_op if exist
read_op_idx = 0
root_block = res.desc.block(0)
if prune_read_op:
while True:
if read_op_idx >= root_block.op_size() or root_block.op(
read_op_idx).type() == 'read':
break
read_op_idx += 1
if read_op_idx < root_block.op_size():
root_block._remove_op(0, read_op_idx + 1)
for var in root_block.all_vars():
if var.type() == core.VarDesc.VarType.READER:
root_block._remove_var(cpt.to_bytes(var.name()))
# change all `is_test` attributes to True
for i in six.moves.range(res.desc.num_blocks()):
block = res.desc.block(i)
for j in six.moves.range(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op._set_attr('is_test', True)
res.blocks = [
Block(res, i) for i in six.moves.range(res.desc.num_blocks())
]
res._sync_with_cpp()
return res
@staticmethod
def parse_from_string(binary_str):
"""
.. note::
1. All information about parameters will be lost after serialization;
2. This API has no effect in Dygraph mode.
Deserialize a Program from `protobuf <https://en.wikipedia.org/wiki/Protocol_Buffers>`_ binary string.
This method always use to save and load model
Args:
binary_str_type (str): the binary prootbuf string.
Returns:
Program: A deserialized Program.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
startup_prog = static.Program()
main_prog = static.Program()
with static.program_guard(startup_prog, main_prog):
x = static.data(name='X', shape=[1000, 784], dtype='float32')
y = static.data(name='Y', shape=[784, 100], dtype='float32')
z = paddle.matmul(x=x, y=y)
binary_str = static.default_main_program().desc.serialize_to_string()
prog_restored = static.default_main_program().parse_from_string(binary_str)
print(static.default_main_program())
print(prog_restored)
"""
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@staticmethod
def _construct_from_desc(desc):
"""
Construct a program from program desc.
Args:
desc(core.ProgramDesc): The program desc for constructing.
Returns:
Program: A program.
"""
p = Program()
p.desc = desc
p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]
p._sync_with_cpp()
return p
@property
def random_seed(self):
"""
The default random seed for random operators in Program. ``0`` means get
the random seed from random device.
.. note::
It must be set before the operators have been added.
Returns:
int64: Random seed in current Program
Examples:
.. code-block:: python
import paddle
import paddle.static as static
import paddle.nn.functional as F
paddle.enable_static()
prog = static.default_main_program()
random_seed = prog.random_seed
x_var = static.data(name="X", shape=[3,3], dtype="float32")
print(random_seed)
## 0
## the default random seed is 0
# Here we need to set random seed before we use paddle.nn.functional.dropout
prog.random_seed = 1
z_var = F.dropout(x_var, 0.7)
print(prog.random_seed)
## 1
## the random seed is change to 1
"""
return self._seed
@property
def num_blocks(self):
"""
The number of :ref:`api_guide_Block_en` in this Program.
.. note::
This API has no effect in Dygraph mode.
Returns:
int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
num_blocks = prog.num_blocks
print(num_blocks)
# print result:
# 1
"""
return self.desc.num_blocks()
@random_seed.setter
def random_seed(self, seed):
if not isinstance(seed, int):
raise ValueError(
"Program.random_seed's input seed must be an integer, but received %s."
% type(seed))
self._seed = seed
def __repr__(self):
return self.__str__()
def global_block(self):
"""
.. note::
This API has no effect in Dygraph mode.
Get the first :ref:`api_guide_Block_en` of this Program.
Returns:
:ref:`api_guide_Block_en`: The first :ref:`api_guide_Block_en` of this Program.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
gb_block = prog.global_block()
print(gb_block)
"""
return self.blocks[0]
def block(self, index):
"""
.. note::
This API has no effect in Dygraph mode.
Get the :code:`index` :ref:`api_guide_Block_en` of this Program
Args:
index (int) - The index of :ref:`api_guide_Block_en` to get
Returns:
:ref:`api_guide_Block_en`: The :code:`index` block
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
block_0 = prog.block(0)
print(block_0)
"""
return self.blocks[index]
def current_block(self):
"""
.. note::
This API has no effect in Dygraph mode.
Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en`
is the :ref:`api_guide_Block_en` to append operators.
Returns:
:ref:`api_guide_Block_en`: The :code:`index` :ref:`api_guide_Block_en`
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
current_blk = prog.current_block()
print(current_blk)
"""
return self.blocks[self.current_block_idx]
def _create_block(self, parent_idx=None):
"""
Create a new block with the :code:`parent_idx` and change the current block
to new block.
Args:
parent_idx(int): The parent block index.
Returns:
Block: The new block.
"""
new_block_idx = len(self.blocks)
parent = self.current_block() if parent_idx is None else self.block(
parent_idx)
self.desc.append_block(parent.desc)
self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx))
return self.current_block()
def _rollback(self):
"""
Exit a code block, i.e., roll back to the parent block.
Returns:
None
"""
self.current_block_idx = self.current_block().parent_idx
def _sync_with_cpp(self):
"""
Synchronize Python instance to its binding C++ object instance.
If the program is modified in C++ space, this method should be invoked.
Notes: This is a very low level API. Users should not invoke it
directly.
Returns:
None
"""
for block_idx in range(len(self.blocks), self.desc.num_blocks()):
self.blocks.append(Block(self, block_idx))
for block in self.blocks:
block._sync_with_cpp()
def _copy_param_info_from(self, other):
"""
Copy the information of parameters from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
self.global_block()._copy_param_info_from(other.global_block())
def _copy_dist_param_info_from(self, other):
"""
Copy the information of distributed information from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
self._is_distributed = other._is_distributed
self._is_chief = other._is_chief
self._parameters_on_pservers = other._parameters_on_pservers
self._endpoints = other._endpoints
self._ps_endpoint = other._ps_endpoint
self._distributed_lookup_table = other._distributed_lookup_table
def _copy_data_info_from(self, other, pruned_origin_block_id_map=None):
"""
Copy the information of data variables from other program.
Notes: This is a very low level API. Users should not invoke it
directly.
Args:
other(Program): Other program
pruned_origin_block_id_map(dict{int:int}): A dict which maps the block id in program
self to the block id in program other. For example, {0:0, 1:1, 2:3} means block 0 in self is
cloned from block 0 in other, etc. Default is None, which means default mapped,
{0:0, 1:1,..., n:n}.
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError(
"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s"
% type(other))
if not pruned_origin_block_id_map:
pruned_origin_block_id_map = {
i: i
for i in six.moves.range(self.desc.num_blocks())
}
# NOTE(zhiqiu): All vars in cloned program exist in original program.
# The reverse is not true, due to backward pruning.
for i, block in enumerate(self.blocks):
other_block = other.blocks[pruned_origin_block_id_map[i]]
for var in list(block.vars.values()):
other_var = other_block.var(var.name)
if other_var.is_data:
var.is_data = True
if other_var.desc.need_check_feed():
var.desc.set_need_check_feed(True)
if other_var.stop_gradient:
var.stop_gradient = True
def list_vars(self):
"""
Get all Tensors from this Program. A iterable object is returned.
Returns:
iterable Tensors: The Generator will yield every Tensor in this program.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
prog = static.default_main_program()
img = static.data(name='img', shape=[None, 1,28,28], dtype='float32')
label = static.data(name='label', shape=[None,1], dtype='int64')
for var in prog.list_vars():
print(var)
# var img : paddle.VarType.LOD_TENSOR.shape(-1, 1, 28, 28).astype(VarType.FP32)
# var label : paddle.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)
"""
for each_block in self.blocks:
for each_var in list(each_block.vars.values()):
yield each_var
def all_parameters(self):
"""
Get all :ref:`api_guide_parameter_en` from this Program. A list object is returned.
Returns:
list[ :ref:`api_guide_parameter_en` ]: The list contians all parameters in this program.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
program = static.default_main_program()
data = static.data(name='x', shape=[None, 13], dtype='float32')
hidden = static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
for param in program.all_parameters():
print(param)
# Here will print all parameters in current program, in this example,
# the result is like:
#
# persist trainable param fc_0.w_0 : paddle.VarType.LOD_TENSOR.shape(13, 10).astype(VarType.FP32)
# persist trainable param fc_0.b_0 : paddle.VarType.LOD_TENSOR.shape(10,).astype(VarType.FP32)
#
# Here print(param) will print out all the properties of a parameter,
# including name, type and persistable, you can access to specific
# property of a parameter, such as param.name, param.type
"""
parameters = []
for each_block in self.blocks:
parameters.extend(each_block.all_parameters())
return parameters
@six.add_metaclass(ParameterMetaClass)
class Parameter(Variable):
"""
Parameter is derived from Variable. A parameter is a persistable
Variable, and will be updated by optimizers after each iteration.
The training of a neural network is essentially the updating of
its parameters.
Relative to a general Variable, a Parameter has several its own
member variables:
Args:
trainable(bool): True if the parameter need to be updated after
iterations.
optimize_attr(map): Parameter attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the parameter. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this parameter.
need_clip (bool): Whether the parameter gradient need to be cliped
in optimizer. Default is True.
"""
def __init__(self,
block,
shape,
dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
**kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
Variable.__init__(
self,
block,
persistable=True,
shape=shape,
dtype=dtype,
type=type,
**kwargs)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.need_clip = kwargs.get('need_clip', True)
self.is_distributed = False
def __str__(self):
return self._to_readable_code()
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
Examples:
.. code-block:: python
import paddle.fluid as fluid
prog = fluid.default_main_program()
rlt = fluid.layers.data("fake_data", shape=[1,1], dtype='float32')
debug_str = prog.to_string(throw_on_error=True, with_details=False)
print(debug_str)
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"do_model_average", "need_clip")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
cpt.to_text(getattr(self, attr_name)))
else:
res_str = Variable.to_string(self, throw_on_error, False)
return res_str
__repr__ = __str__
class ParamBase(core.VarBase):
"""
ParamBase is derived from Tensor( Which is the concept in Dygraph Mode).
A ParamBase is a persistable Tensor, and will be updated by optimizers
after each iteration.
The training of a neural network is essentially the updating of
its ParamBase.
Relative to a general Tensor, a ParamBase has several its own
member variables:
Args:
trainable(bool): True if the ParamBase need to be updated after
iterations.
optimize_attr(map): ParamBase attributes related with optimizing.
Currently, it only contains 'learning_rate'.
Default: {'learning_rate': 1.0}
regularizer(WeightDecayRegularizer): The Regularizer which will
be applied on the ParamBase. Default: None
do_model_average(bool): True if the model average strategy will
be applied on this ParamBase.
need_clip (bool): Whether the parameter gradient need to be cliped
in optimizer. Default is True.
"""
@dygraph_only
def __init__(self, shape, dtype, **kwargs):
if shape is None:
raise ValueError("The shape of Parameter should not be None")
if dtype is None:
raise ValueError("The dtype of Parameter should not be None")
if len(shape) == 0:
raise ValueError(
"The dimensions of shape for Parameter must be greater than 0")
for each in shape:
if each < 0:
raise ValueError(
"Each dimension of shape for Parameter must be greater than 0, but received %s"
% list(shape))
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
name = kwargs.get('name', unique_name.generate('_param_base'))
super(ParamBase, self).__init__(dtype
if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name,
core.VarDesc.VarType.LOD_TENSOR, True)
trainable = kwargs.get('trainable', True)
self.stop_gradient = not trainable
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.do_model_average = kwargs.get('do_model_average', None)
self.need_clip = kwargs.get('need_clip', True)
self.is_distributed = False
# self.block = default_main_program().global_block()
@property
def trainable(self):
return not self.stop_gradient
@trainable.setter
def trainable(self, trainable):
if isinstance(trainable, bool):
self.stop_gradient = not trainable
else:
raise ValueError(
"The type of trainable MUST be bool, but the type is ",
type(trainable))
def __str__(self):
"""
Convert a ParamBase object to a readable string.
Returns(str): A readable string.
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(3, 3)
print(linear.weight)
# Parameter containing:
# Tensor(shape=[3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
# [[ 0.48948765, 0.05829060, -0.25524026],
# [-0.70368278, 0.52986908, -0.68742192],
# [-0.54217887, 0.48439729, 0.34082305]])
"""
return "Parameter containing:\n{tensor}".format(
tensor=super(ParamBase, self).__str__())
def __deepcopy__(self, memo):
"""
Deep copy parameter, it will always performs Tensor copy.
Examples:
.. code-block:: python
import paddle
import copy
linear = paddle.nn.Linear(1, 3)
linear_copy = copy.deepcopy(linear)
print(linear.weight)
# Parameter containing:
# Tensor(shape=[1, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[-0.30929261, -0.90929240, -1.07851017]])
print(linear_copy.weight)
# Parameter containing:
# Tensor(shape=[1, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[-0.30929261, -0.90929240, -1.07851017]])
"""
state = copy.deepcopy(self.__dict__, memo)
state["name"] = self.name + unique_name.generate("_deepcopy")
new_param = ParamBase(self.shape, self.dtype, **state)
memo[id(self)] = new_param
new_param.copy_(self, True)
return new_param
__repr__ = __str__
# program is a global instance.
_main_program_ = Program()
_startup_program_ = Program()
def default_startup_program():
"""
Get default/global startup program.
The :code:`paddle.nn` function will append the initialization operators into startup program.
The :code:`startup_program` will initialize the parameters by the OPs.
This method will return the default or the current startup program. Users can use
:ref:`api_paddle_fluid_framework_program_guard` to switch :ref:`api_paddle_fluid_framework_Program` .
Returns:
Program: current default startup program.
Returns type:
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name="x", shape=[-1, 784], dtype='float32')
out = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu")
print("main program is: {}".format(paddle.static.default_main_program()))
print("start up program is: {}".format(paddle.static.default_startup_program()))
"""
return _startup_program_
def default_main_program():
"""
This API can be used to get ``default main program`` which store the
descriptions of Ops and tensors.
For example ``z = paddle.add(x, y)`` will create a new ``add``
Op and a new ``z`` tensor, and they will be recorded in ``default main program`` .
The ``default main program`` is the default value for ``Program`` parameter in
a lot of APIs. For example, the :code:`Executor.run()` will execute the
:code:`default_main_program` when the program is not specified.
If you want to switch the ``default main program``, you can use :ref:`api_paddle_fluid_framework_program_guard` .
Returns:
Program: A ``Program`` which holding the descriptions of OPs and tensors in the network.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
# Sample Network:
x = paddle.static.data(name='x', shape=[100, 100], dtype='float32')
y = paddle.static.data(name='x', shape=[100, 100], dtype='float32')
out = paddle.add(x, y)
#print the number of blocks in the program, 1 in this case
print(paddle.static.default_main_program().num_blocks) # 1
#print the default_main_program
print(paddle.static.default_main_program())
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@signature_safe_contextmanager
def program_guard(main_program, startup_program=None):
"""
:api_attr: Static Graph
Change the global main program and startup program with ``with`` statement.
Layer functions in the Python ``with`` block will append operators and
Tensors to the new main programs.
Args:
main_program(Program): New main program inside ``with`` statement.
startup_program(Program, optional): New startup program inside ``with``
statement. :code:`None` means not changing startup program,
default_startup_program is still used.
Default: None.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = paddle.static.nn.fc(x=data, size=10, activation='relu')
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
main_program = paddle.static.Program()
# does not care about startup program. Just pass a temporary value.
with paddle.static.program_guard(main_program, paddle.static.Program()):
data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
"""
from .data_feeder import check_type
check_type(main_program, 'main_program', Program,
'paddle.static.program_guard')
main_program = switch_main_program(main_program)
if startup_program is not None:
check_type(startup_program, 'startup_program', Program,
'paddle.static.program_guard')
startup_program = switch_startup_program(startup_program)
try:
yield
finally:
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
def _get_var(name, program=None):
"""
Get a variable by name from the global block of a program.
Args:
name(str): name of the variable
program(Program|None): program object.
If None, default_global_program() will be used.
Returns:
Variable
"""
if program is None:
program = default_main_program()
assert isinstance(name, str)
assert isinstance(program, Program)
return program.global_block().var(name)
@signature_safe_contextmanager
def _dygraph_guard(tracer):
global _dygraph_tracer_
tmp_tracer = _dygraph_tracer_
_dygraph_tracer_ = tracer
core._switch_tracer(tracer)
try:
yield
finally:
core._switch_tracer(tmp_tracer)
_dygraph_tracer_ = tmp_tracer
@signature_safe_contextmanager
def _dygraph_place_guard(place):
global _global_expected_place_
tmp_place = _global_expected_place_
_global_expected_place_ = place
_set_dygraph_tracer_expected_place(place)
try:
yield
finally:
_global_expected_place_ = tmp_place
_set_dygraph_tracer_expected_place(tmp_place)
def load_op_library(lib_filename):
"""
:api_attr: Static Graph
Load a dynamic library, including custom operators and kernels.
When library is loaded, ops and kernels registered in the library
will be available in PaddlePaddle main process.
Please note, the type of custom operators can't have the same type
with the existing operators in the framework.
Args:
lib_filename (str): name of dynamic library.
Returns:
list[str]: new registered custom op names.
Examples:
.. code-block:: python
import paddle.fluid as fluid
#fluid.load_op_library('custom_op.so')
"""
core.load_op_library(lib_filename)
return OpProtoHolder.instance().update_op_proto()
def switch_device(device):
global _current_device
pre_device = _current_device
_current_device = device
return pre_device
@signature_safe_contextmanager
def device_guard(device=None):
"""
**Notes**:
**The API only supports static mode.**
A context manager that specifies the device on which the OP will be placed.
Args:
device(str|None): Specify the device to use in the context. It should be 'cpu' or 'gpu',
When it is set to 'cpu' or 'gpu', all OPs created in the context will be
placed on CPUPlace or CUDAPlace. When 'gpu' is set and the program runs on
single-card, the device index will be the same as the device on which the
executor runs. Default: None, OPs in this context will be automatically
assigned devices.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
support_gpu = paddle.is_compiled_with_cuda()
place = paddle.CPUPlace()
if support_gpu:
place = paddle.CUDAPlace(0)
# if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)
data1 = paddle.full(shape=[1, 3, 8, 8], fill_value=0.5, dtype='float32')
data2 = paddle.full(shape=[1, 3, 64], fill_value=0.5, dtype='float32')
shape = paddle.shape(data2)
with paddle.static.device_guard("cpu"):
# Ops created here will be placed on CPUPlace
shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])
with paddle.static.device_guard('gpu'):
# if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace
out = paddle.reshape(data1, shape=shape)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
result = exe.run(fetch_list=[out])
"""
index = None
if device and ':' in device:
device, index = device.split(':')
if device == 'cpu':
raise ValueError("Should not set device id for cpu.")
if device not in ['cpu', 'gpu', '', None]:
raise ValueError(
"The Attr(device) should be 'cpu' or 'gpu', and it can also be empty string or None "
"when there is no need to specify device. But received %s" % device)
if index:
device = ":".join([device, index])
pre_device = switch_device(device)
try:
yield
finally:
switch_device(pre_device)
def set_flags(flags):
"""
This function sets the GFlags value in Paddle.
Args:
flags (dict): A dict contains flags and its value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})
"""
if not isinstance(flags, dict):
raise TypeError('flags in set_flags should be a dict')
for key, value in flags.items():
if core.globals().is_public(key):
core.globals()[key] = value
else:
raise ValueError(
"Flag %s cannot set its value through this function." % (key))
def get_flags(flags):
"""
This function gets the GFlags value in Paddle.
Args:
flags(list|tuple|str): A list/tuple of string or a string which is the flag's name.
Returns:
flag's value in Paddle.
Examples:
.. code-block:: python
import paddle.fluid as fluid
flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']
res = fluid.get_flags(flags)
print(res)
# {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}
"""
flags_value = {}
if isinstance(flags, (list, tuple)):
for key in flags:
if (core.globals().is_public(key)):
value = core.globals()[key]
temp = {key: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' %
(key))
elif isinstance(flags, str):
if (core.globals().is_public(flags)):
value = core.globals()[flags]
temp = {flags: value}
flags_value.update(temp)
else:
raise ValueError(
'Flag %s cannot get its value through this function.' % (flags))
else:
raise TypeError('Flags in get_flags should be a list, tuple or string.')
return flags_value
def _get_paddle_place(place):
"convert the string to paddle Place"
if place is None:
return place
if isinstance(place, (core.Place, core.XPUPlace, core.CPUPlace,
core.CUDAPinnedPlace, core.CUDAPlace)):
return place
if not isinstance(place, str):
raise ValueError(
"place only support string which is 'Place' and so on.")
place = place.lower()
if (place == "cpu"):
return core.CPUPlace()
if (place == "device"):
return core.Place()
avaliable_gpu_place = re.match(r'gpu:\d+', place)
if place == "gpu_pinned" or place == "gpu" or avaliable_gpu_place:
if not core.is_compiled_with_cuda():
raise ValueError(
"The device should not be {}, since PaddlePaddle is " \
"not compiled with CUDA".format(avaliable_gpu_place))
if place == "gpu_pinned":
return core.CUDAPinnedPlace()
elif place == "gpu":
return core.CUDAPlace(0)
else:
place_info_list = place.split(':', 1)
device_id = place_info_list[1]
device_id = int(device_id)
return core.CUDAPlace(device_id)
avaliable_xpu_place = re.match(r'xpu:\d+', place)
if avaliable_xpu_place:
if not core.is_compiled_with_xpu():
raise ValueError(
"The device should not be {}, since PaddlePaddle is " \
"not compiled with XPU".format(avaliable_xpu_place))
place_info_list = place.split(':', 1)
device_id = place_info_list[1]
device_id = int(device_id)
return core.XPUPlace(device_id)
raise ValueError(
"paddle support CPUPlace, CUDAPlace,CUDAPinnedPlace and XPUPlace, Please check your Place Input"
)
def _get_paddle_place_list(places):
if not isinstance(places, (list, tuple)):
raise TypeError("places must to be List or Tuple")
ret = []
for p in places:
p = _get_paddle_place(p)
ret.append(p)
return ret
|
the-stack_106_31567 | #!/usr/bin/env python
import re,sys,os,copy
from collections import defaultdict as ddict
class f90depinfo(object):
def __init__(self):
self.uses = ddict()
self.provides = ddict()
def getline( liter ):
line = ""
while len(line.strip()) < 1:
line = liter.next().upper()
line = re.sub(r"^(.*?)!.*$",r"\1",line)
line = re.sub(r"ONLY.*$","",line)
line = line.strip()
if line[-1] == "&":
line += getline( liter )
line = re.sub(r"&","",line)
return line
def replace_ext(name,ext):
fileName, fileExtension = os.path.splitext(name)
return fileName + ext
def f90deps(srcs,directory=None):
files = []
exts = [ ".f90", ".F90", ".F", ".f", ".f77", ".f03", ".F03", ".f08", ".F08" ]
for src in srcs:
fileName, fileExtension = os.path.splitext(src)
if fileExtension in exts:
files.append(src)
srcs = files
use_line_re = re.compile(r"^\s*use\s+(\S.+)\s*$",re.IGNORECASE)
mod_line_re = re.compile(r"^\s*module\s+(\S+)\s*$",re.IGNORECASE)
cont_line_re = re.compile(r"^(.*)&\s*$")
split_re = re.compile(r"\s*,\s*")
dep_re = re.compile(r"(.*)")
mod_re = re.compile(r"(.*)")
info = ddict()
for src in srcs:
info[src] = f90depinfo()
if directory is not None and directory != "":
fh = open(directory + "/" + src,"r")
else:
fh = open(src,"r")
liter = iter(fh)
while True:
try:
line = getline(liter)
has_use = re.match( use_line_re, line )
has_mod = re.match( mod_line_re, line )
if has_use is not None:
for mod in has_use.group(1).split(","):
info[src].uses[ mod.strip() ] = None
elif has_mod is not None:
info[src].provides[ has_mod.group(1).strip() ] = None
except Exception as e:
# print "exception: ",e
break
modules = ddict()
for src in srcs:
for m in info[src].provides:
modules[m] = src
for src in srcs:
tmp = copy.deepcopy( info[src].uses )
for m in info[src].uses:
if not m in modules:
tmp.pop(m,None)
else:
tmp[m] = modules[m]
for m in info[src].provides:
if m in tmp:
tmp.pop(m,None)
info[src].uses = tmp
return info
def printf90deps(info,directory=None,extension="$(OBJEXT)",fileprefix=""):
result = ""
for src in info:
fname = src
oname = src
if directory is not None:
fname = ("/".join([directory,src])).replace(r"//",r"/").lstrip(r"/")
p,f = os.path.split(fname)
oname = p + "/" + fileprefix + f
# print src,directory,oname
#oname = ("/".join([directory,re.sub(r"/","_",fname)])).replace(r"//",r"/") # if AM_INIT_AUTOMAKE([subdir-objects])
#oname = fname
result += replace_ext(oname,"." + extension) + " : " + fname
for m in info[src].uses:
oname = info[src].uses[m]
if directory is not None:
oname = ("/".join([directory,oname])).replace(r"//",r"/").lstrip(r"/") # if AM_INIT_AUTOMAKE([subdir-objects])
p,f = os.path.split(oname)
oname = p + "/" + fileprefix + f
result += " " + replace_ext( oname, "." + extension )
result += "\n"
return result
if __name__ == "__main__":
print(printf90deps( f90deps(sys.argv[1:]), None, "o" ))
|
the-stack_106_31568 | """All constants related to the ZHA component."""
from __future__ import annotations
import enum
import logging
import bellows.zigbee.application
import voluptuous as vol
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
import zigpy_znp.zigbee.application
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.number import DOMAIN as NUMBER
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
import homeassistant.helpers.config_validation as cv
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_NAMES = "endpoint_names"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NEIGHBORS = "neighbors"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_BINARY_INPUT = "binary_input"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ANALOG_OUTPUT = "analog_output"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_ACE = "ias_ace"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
PLATFORMS = (
ALARM,
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
NUMBER,
SENSOR,
SWITCH,
)
CONF_ALARM_MASTER_CODE = "alarm_master_code"
CONF_ALARM_FAILED_TRIES = "alarm_failed_tries"
CONF_ALARM_ARM_REQUIRES_CODE = "alarm_arm_requires_code"
CONF_BAUDRATE = "baudrate"
CONF_CUSTOM_QUIRKS_PATH = "custom_quirks_path"
CONF_DATABASE = "database_path"
CONF_DEFAULT_LIGHT_TRANSITION = "default_light_transition"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_IDENTIFY_ON_JOIN = "enable_identify_on_join"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
CONF_CONSIDER_UNAVAILABLE_MAINS = "consider_unavailable_mains"
CONF_DEFAULT_CONSIDER_UNAVAILABLE_MAINS = 60 * 60 * 2 # 2 hours
CONF_CONSIDER_UNAVAILABLE_BATTERY = "consider_unavailable_battery"
CONF_DEFAULT_CONSIDER_UNAVAILABLE_BATTERY = 60 * 60 * 6 # 6 hours
CONF_ZHA_OPTIONS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DEFAULT_LIGHT_TRANSITION): cv.positive_int,
vol.Required(CONF_ENABLE_IDENTIFY_ON_JOIN, default=True): cv.boolean,
vol.Optional(
CONF_CONSIDER_UNAVAILABLE_MAINS,
default=CONF_DEFAULT_CONSIDER_UNAVAILABLE_MAINS,
): cv.positive_int,
vol.Optional(
CONF_CONSIDER_UNAVAILABLE_BATTERY,
default=CONF_DEFAULT_CONSIDER_UNAVAILABLE_BATTERY,
): cv.positive_int,
}
)
CONF_ZHA_ALARM_SCHEMA = vol.Schema(
{
vol.Required(CONF_ALARM_MASTER_CODE, default="1234"): cv.string,
vol.Required(CONF_ALARM_FAILED_TRIES, default=3): cv.positive_int,
vol.Required(CONF_ALARM_ARM_REQUIRES_CODE, default=False): cv.boolean,
}
)
CUSTOM_CONFIGURATION = "custom_configuration"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DATA_ZHA_SHUTDOWN_TASK = "zha_shutdown_task"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_ZNP = "zigpy_znp"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZNP: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DEVICE_PAIRING_STATUS = "pairing_status"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
PRESET_SCHEDULE = "schedule"
PRESET_COMPLEX = "complex"
ZHA_ALARM_OPTIONS = "zha_alarm_options"
ZHA_OPTIONS = "zha_options"
ZHA_CONFIG_SCHEMAS = {
ZHA_OPTIONS: CONF_ZHA_OPTIONS_SCHEMA,
ZHA_ALARM_OPTIONS: CONF_ZHA_ALARM_SCHEMA,
}
class RadioType(enum.Enum):
"""Possible options for radio type."""
znp = (
"ZNP = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_znp.zigbee.application.ControllerApplication,
)
ezsp = (
"EZSP = Silicon Labs EmberZNet protocol: Elelabs, HUSBZB-1, Telegesis",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"deCONZ = dresden elektronik deCONZ protocol: ConBee I/II, RaspBee I/II",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"Legacy TI_CC = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = (
"ZiGate = ZiGate Zigbee radios: PiZiGate, ZiGate USB-TTL, ZiGate WiFi",
zigpy_zigate.zigbee.application.ControllerApplication,
)
xbee = (
"XBee = Digi XBee Zigbee radios: Digi XBee Series 2, 2C, 3",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> list[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T) -> None:
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_ATTR_PER_REQ = 3
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_CHANNEL_MSG = "zha_channel_message"
ZHA_CHANNEL_MSG_BIND = "zha_channel_bind"
ZHA_CHANNEL_MSG_CFG_RPT = "zha_channel_configure_reporting"
ZHA_CHANNEL_MSG_DATA = "zha_channel_msg_data"
ZHA_CHANNEL_CFG_DONE = "zha_channel_cfg_done"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
|
the-stack_106_31569 | import rospy
import sys
import json
from interactivespaces_msgs.msg import GenericMessage
"""
This script may be used to send director message.
You just need to supply a json file with director message
in it.
"""
if len(sys.argv) <= 1:
print("Sorry - you need to supply path to json file for emission")
print("e.g. ./script.py <path_to_json>")
sys.exit(1)
try:
json_file = open(sys.argv[1], 'r')
except IOError:
print("Could not open file")
try:
message = json_file.read()
except IOError:
print("Could not read file")
try:
message = json.loads(message)
DIRECTOR_MESSAGE = json.dumps(message)
except ValueError:
print("Could not parse json file")
msg = GenericMessage()
msg.type = 'json'
msg.message = DIRECTOR_MESSAGE
try:
rospy.init_node('director_messager')
rospy.sleep(1)
rospy.Publisher('/director/scene', GenericMessage, queue_size=10, latch=True).publish(msg)
rospy.sleep(1)
except KeyboardInterrupt:
print("Exiting cleanly")
rospy.signal_shutdown("Ctrl+c used on send_director.py")
|
the-stack_106_31571 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import login_required, login_user, logout_user
from tour.extensions import api, login_manager
from tour.public.forms import LoginForm
from tour.public.points import PointsAPI, PointsListAPI
from tour.user.forms import RegisterForm
from tour.user.models import User
from tour.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
api.add_resource(PointsListAPI, '/tour/api/v1.0/points', endpoint='points')
api.add_resource(PointsAPI, '/tour/api/v1.0/points/<int:id>', endpoint='point')
@blueprint.route('/', methods=['GET', 'POST'])
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return render_template('public/home.html', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form)
if form.validate_on_submit():
User.create(username=form.username.data, email=form.email.data, password=form.password.data, active=True)
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
|
the-stack_106_31573 | '''
strates how to use `CNN` model from
`speechemotionrecognition` package
'''
from keras.utils import np_utils
import pulsectl
import serial
import time
import os
import sys
import collections
import webrtcvad
import signal
import subprocess
import socket as sk
import numpy as np
from common import extract_data
from dnn_test import CNN, LSTM
from utilities_test import get_feature_vector_from_mfcc, get_stream_feature_vector_from_mfcc
from pywebrtcvad.vadfunc import make_chunk, make_stream_chunk, write_wave, frame_generator, stream_vad_collector
pulse = pulsectl.Pulse('my-client-name')
def give_me_the_device_num():
source_list = pulse.source_list()
for row in source_list:
if str(row).find('bluez') != -1:
chunk = str(row).split(' ')
for cc in chunk:
idx = cc.find('index=')
if idx != -1:
return cc[6:-1]
return -1
def recording_blue(rate, device_num):
cmd = "parec -r --rate=" + str(rate) + " --device=" + str(device_num) + " --channels=1"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
return process
def lstm_example():
# initializing dont touch
timeout = False
trigger_num = 0
ser_to_ino = serial.Serial('/dev/ttyUSB0', 9600)
file_idx = 0
rate = 16000
duration_sec = 4
duration_byte = rate * 2 * duration_sec
to_flatten = False
in_shape = np.zeros((198,39))
model = LSTM(input_shape=in_shape.shape, num_classes=7)
load_path = 'korean_LSTM_best_model.h5'
model.load_model(load_path)
model.trained = True
print('start')
device_num = give_me_the_device_num()
print("device_num: ", device_num)
process = recording_blue(rate, device_num)
pcm_list = []
aggressive = 3
triggered = False
padding_duration_ms = 300
frame_duration_ms = 30
n = int(rate * (frame_duration_ms / 1000.0) * 2)
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
ring_buffer = collections.deque(maxlen=num_padding_frames)
vad = webrtcvad.Vad(aggressive)
voiced_frames = []
sibal_idx = 0
while(1):
pcm_data_line = process.stdout.readline()
# pcm_data += pcm_data_line
pcm_list.append(pcm_data_line)
# target_num = len(pcm_data) // n # number of audio data for 30 milli seconds
target_num = len(pcm_list)
if target_num <= 300:
continue
pcm_data = b''.join(pcm_list)
sibal_idx += 1
target_num = len(pcm_data) // n
pcm_data_to_chunk = pcm_data[:n * target_num]
pcm_list = [pcm_data[n * target_num:]]
# pcm_data = pcm_data[n * target_num:]
frames = list(frame_generator(frame_duration_ms, pcm_data_to_chunk, rate))
for frame in frames:
triggered, voiced_audio, timeout = stream_vad_collector(rate, vad, frame, triggered, ring_buffer, voiced_frames, timeout)
if triggered and not timeout:
trigger_num += 1
if 150 <= trigger_num: # 150 means 4.5 seconds
timeout = True
if voiced_audio is not None: # talking -> no talking then this if works.
trigger_num = 0
voiced_frames = []
emotion = model.predict_one(get_stream_feature_vector_from_mfcc(voiced_audio, fs=rate, flatten=to_flatten))
print(emotion)
ser_to_ino.write(str(emotion).encode('utf-8'))
file_idx += 1
if __name__ == "__main__":
lstm_example()
|
the-stack_106_31576 | from matplotlib import pyplot as plt
x = []
y = []
for i in range(100):
x.append(i)
y.append(i)
# Mention x and y limits to define their range
plt.xlim(0, 100)
plt.ylim(0, 100)
# Ploting graph
plt.plot(x, y, color = 'green')
plt.pause(0.01)
plt.show()
|
the-stack_106_31578 |
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.preprocessing import LabelEncoder
from pyriemann.classification import MDM
from pyriemann.estimation import ERPCovariances
from braininvaders2012.dataset import BrainInvaders2012
from tqdm import tqdm
import numpy as np
import mne
import joblib
"""
=============================
Classification of the trials
=============================
This example shows how to extract the epochs from the dataset of a given
subject and then classify them using Machine Learning techniques using
Riemannian Geometry.
"""
# Authors: Pedro Rodrigues <[email protected]>
#
# License: BSD (3-clause)
import warnings
warnings.filterwarnings("ignore")
# define the dataset instance
dataset = BrainInvaders2012(Training=True)
scr = {}
# get the data from subject of interest
for subject in dataset.subject_list:
data = dataset._get_single_subject_data(subject)
raw = data['session_1']['run_training']
# filter data and resample
fmin = 1
fmax = 24
raw.filter(fmin, fmax, verbose=False)
# detect the events and cut the signal into epochs
events = mne.find_events(raw=raw, shortest_event=1, verbose=False)
event_id = {'NonTarget': 1, 'Target': 2}
epochs = mne.Epochs(raw, events, event_id, tmin=0.0, tmax=1.0, baseline=None, verbose=False, preload=True)
epochs.pick_types(eeg=True)
# get trials and labels
X = epochs.get_data()
y = events[:, -1]
y = LabelEncoder().fit_transform(y)
# cross validation
skf = StratifiedKFold(n_splits=5)
clf = make_pipeline(ERPCovariances(estimator='lwf', classes=[1]), MDM())
scr[subject] = cross_val_score(clf, X, y, cv=skf, scoring='roc_auc').mean()
# print results of classification
print('subject', subject)
print('mean AUC :', scr[subject])
#####
filename = './classification_scores.pkl'
joblib.dump(scr, filename)
with open('classification_scores.txt', 'w') as the_file:
for subject in scr.keys():
the_file.write('subject ' + str(subject).zfill(2) + ' :' + ' {:.2f}'.format(scr[subject]) + '\n')
|
the-stack_106_31579 | """
Utility functions used across scripts.
"""
__author__ = "Shyue Ping Ong, Dan Gunter"
__copyright__ = "Copyright 2012-2014, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Dan Gunter"
__email__ = "[email protected]"
__date__ = "2012-12-01"
## Imports
import bson
import datetime
import json
import logging
from pymongo.mongo_client import MongoClient
from pymatgen.db.dbconfig import DBConfig
# Backwards compatibility from refactor to `dbconfig` module
# Copy of functions that were moved
from pymatgen.db.dbconfig import normalize_auth
# Copy of global constants that were moved
DEFAULT_PORT = DBConfig.DEFAULT_PORT
DEFAULT_CONFIG_FILE = DBConfig.DEFAULT_FILE
DEFAULT_SETTINGS = DBConfig.DEFAULT_SETTINGS
## Logging
_log = logging.getLogger("mg.util")
## Classes
class MongoJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, bson.objectid.ObjectId):
return str(o)
if isinstance(o, datetime.datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
## Functions
def get_settings(config_file):
cfg = DBConfig(config_file)
return cfg.settings
def get_database(config_file=None, settings=None, admin=False, **kwargs):
d = get_settings(config_file) if settings is None else settings
conn = MongoClient(host=d["host"], port=d["port"], **kwargs)
db = conn[d["database"]]
try:
user = d["admin_user"] if admin else d["readonly_user"]
passwd = d["admin_password"] if admin else d["readonly_password"]
db.authenticate(user, passwd)
except (KeyError, TypeError, ValueError):
_log.warn(
"No {admin,readonly}_user/password found in config. file, "
"accessing DB without authentication"
)
return db
def get_collection(config_file, admin=False, settings=None):
if settings is None:
settings = get_settings(config_file)
db = get_database(admin=admin, settings=settings)
return db[settings["collection"]]
def collection_keys(coll, sep="."):
"""Get a list of all (including nested) keys in a collection.
Examines the first document in the collection.
:param sep: Separator for nested keys
:return: List of str
"""
def _keys(x, pre=""):
for k in x:
yield (pre + k)
if isinstance(x[k], dict):
for nested in _keys(x[k], pre + k + sep):
yield nested
return list(_keys(coll.find_one()))
def csv_list(l):
"""Format list to a string with comma-separated values."""
if len(l) == 0:
return ""
return ", ".join(map(str, l))
def quotable(v):
if isinstance(v, int) or isinstance(v, float):
return str(v)
return "'{}'".format(v)
def csv_dict(d):
"""Format dict to a string with comma-separated values."""
if len(d) == 0:
return "{}"
return (
"{" + ", ".join(["'{}': {}".format(k, quotable(v)) for k, v in d.items()]) + "}"
)
def kvp_dict(d):
"""Format dict to key=value pairs."""
return ", ".join(["{}={}".format(k, quotable(v)) for k, v in d.items()])
|
the-stack_106_31581 | import caffe2onnx.src.c2oObject as Node
##--------------------------------------------------Reshape---------------------------------------------------------##
# Calculate the output dimension
def getReshapeOutShape(layer,input_shape):
try:
# Get the layer's reshape param
re_shape = layer.reshape_param.shape.dim
except Exception as e:
re_shape = []
# Calculate the product of all dimensions of input shape
in_prod = 1
for dim in input_shape[0]:
in_prod = in_prod * dim
if re_shape == []:
output_shape = [[1,in_prod]]
else:
output_shape = re_shape
for i in range(len(re_shape)):
if re_shape[i] == 0:
output_shape[i] = input_shape[0][i]
for j in range(len(output_shape)):
if output_shape[j] == -1:
for d in output_shape:
in_prod = in_prod / d
output_shape[j] = int(in_prod * -1)
output_shape = [output_shape]
return output_shape
# Build node
def createReshape(layer, nodename, inname, outname, input_shape):
# Get output_shape
output_shape = getReshapeOutShape(layer,input_shape)
# Build node
node = Node.c2oNode(layer, nodename, "Reshape", inname, outname, input_shape, output_shape)
print(nodename, " node construction completed")
return node |
the-stack_106_31582 | '''
MIT License
Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import cv2
import numpy as np
from .glm import ortho
class Camera:
def __init__(self, width=1600, height=1200):
# Focal Length
# equivalent 50mm
focal = np.sqrt(width * width + height * height)
self.focal_x = focal
self.focal_y = focal
# Principal Point Offset
self.principal_x = width / 2
self.principal_y = height / 2
# Axis Skew
self.skew = 0
# Image Size
self.width = width
self.height = height
self.near = 1
self.far = 10
# Camera Center
self.eye = np.array([0, 0, -3.6])
self.center = np.array([0, 0, 0])
self.direction = np.array([0, 0, -1])
self.right = np.array([1, 0, 0])
self.up = np.array([0, 1, 0])
self.ortho_ratio = None
def sanity_check(self):
self.center = self.center.reshape([-1])
self.direction = self.direction.reshape([-1])
self.right = self.right.reshape([-1])
self.up = self.up.reshape([-1])
assert len(self.center) == 3
assert len(self.direction) == 3
assert len(self.right) == 3
assert len(self.up) == 3
@staticmethod
def normalize_vector(v):
v_norm = np.linalg.norm(v)
return v if v_norm == 0 else v / v_norm
def get_real_z_value(self, z):
z_near = self.near
z_far = self.far
z_n = 2.0 * z - 1.0
z_e = 2.0 * z_near * z_far / (z_far + z_near - z_n * (z_far - z_near))
return z_e
def get_rotation_matrix(self):
rot_mat = np.eye(3)
d = self.eye - self.center
d = -self.normalize_vector(d)
u = self.up
self.right = -np.cross(u, d)
u = np.cross(d, self.right)
rot_mat[0, :] = self.right
rot_mat[1, :] = u
rot_mat[2, :] = d
# s = self.right
# s = self.normalize_vector(s)
# rot_mat[0, :] = s
# u = self.up
# u = self.normalize_vector(u)
# rot_mat[1, :] = -u
# rot_mat[2, :] = self.normalize_vector(self.direction)
return rot_mat
def get_translation_vector(self):
rot_mat = self.get_rotation_matrix()
trans = -np.dot(rot_mat.T, self.eye)
return trans
def get_intrinsic_matrix(self):
int_mat = np.eye(3)
int_mat[0, 0] = self.focal_x
int_mat[1, 1] = self.focal_y
int_mat[0, 1] = self.skew
int_mat[0, 2] = self.principal_x
int_mat[1, 2] = self.principal_y
return int_mat
def get_projection_matrix(self):
ext_mat = self.get_extrinsic_matrix()
int_mat = self.get_intrinsic_matrix()
return np.matmul(int_mat, ext_mat)
def get_extrinsic_matrix(self):
rot_mat = self.get_rotation_matrix()
int_mat = self.get_intrinsic_matrix()
trans = self.get_translation_vector()
extrinsic = np.eye(4)
extrinsic[:3, :3] = rot_mat
extrinsic[:3, 3] = trans
return extrinsic[:3, :]
def set_rotation_matrix(self, rot_mat):
self.direction = rot_mat[2, :]
self.up = -rot_mat[1, :]
self.right = rot_mat[0, :]
def set_intrinsic_matrix(self, int_mat):
self.focal_x = int_mat[0, 0]
self.focal_y = int_mat[1, 1]
self.skew = int_mat[0, 1]
self.principal_x = int_mat[0, 2]
self.principal_y = int_mat[1, 2]
def set_projection_matrix(self, proj_mat):
res = cv2.decomposeProjectionMatrix(proj_mat)
int_mat, rot_mat, camera_center_homo = res[0], res[1], res[2]
camera_center = camera_center_homo[0:3] / camera_center_homo[3]
camera_center = camera_center.reshape(-1)
int_mat = int_mat / int_mat[2][2]
self.set_intrinsic_matrix(int_mat)
self.set_rotation_matrix(rot_mat)
self.center = camera_center
self.sanity_check()
def get_gl_matrix(self):
z_near = self.near
z_far = self.far
rot_mat = self.get_rotation_matrix()
int_mat = self.get_intrinsic_matrix()
trans = self.get_translation_vector()
extrinsic = np.eye(4)
extrinsic[:3, :3] = rot_mat
extrinsic[:3, 3] = trans
axis_adj = np.eye(4)
axis_adj[2, 2] = -1
axis_adj[1, 1] = -1
model_view = np.matmul(axis_adj, extrinsic)
projective = np.zeros([4, 4])
projective[:2, :2] = int_mat[:2, :2]
projective[:2, 2:3] = -int_mat[:2, 2:3]
projective[3, 2] = -1
projective[2, 2] = (z_near + z_far)
projective[2, 3] = (z_near * z_far)
if self.ortho_ratio is None:
ndc = ortho(0, self.width, 0, self.height, z_near, z_far)
perspective = np.matmul(ndc, projective)
else:
perspective = ortho(-self.width * self.ortho_ratio / 2, self.width * self.ortho_ratio / 2,
-self.height * self.ortho_ratio / 2, self.height * self.ortho_ratio / 2,
z_near, z_far)
return perspective, model_view
def KRT_from_P(proj_mat, normalize_K=True):
res = cv2.decomposeProjectionMatrix(proj_mat)
K, Rot, camera_center_homog = res[0], res[1], res[2]
camera_center = camera_center_homog[0:3] / camera_center_homog[3]
trans = -Rot.dot(camera_center)
if normalize_K:
K = K / K[2][2]
return K, Rot, trans
def MVP_from_P(proj_mat, width, height, near=0.1, far=10000):
'''
Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix
:param proj_mat: OpenCV camera projeciton matrix
:param width: Image width
:param height: Image height
:param near: Z near value
:param far: Z far value
:return: OpenGL projection matrix and model view matrix
'''
res = cv2.decomposeProjectionMatrix(proj_mat)
K, Rot, camera_center_homog = res[0], res[1], res[2]
camera_center = camera_center_homog[0:3] / camera_center_homog[3]
trans = -Rot.dot(camera_center)
K = K / K[2][2]
extrinsic = np.eye(4)
extrinsic[:3, :3] = Rot
extrinsic[:3, 3:4] = trans
axis_adj = np.eye(4)
axis_adj[2, 2] = -1
axis_adj[1, 1] = -1
model_view = np.matmul(axis_adj, extrinsic)
zFar = far
zNear = near
projective = np.zeros([4, 4])
projective[:2, :2] = K[:2, :2]
projective[:2, 2:3] = -K[:2, 2:3]
projective[3, 2] = -1
projective[2, 2] = (zNear + zFar)
projective[2, 3] = (zNear * zFar)
ndc = ortho(0, width, 0, height, zNear, zFar)
perspective = np.matmul(ndc, projective)
return perspective, model_view
|
the-stack_106_31585 | #!/usr/bin/env python3
import pandas as pd
import argparse
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
class MineRegression:
def __init__(self,
system_information=None,
save_csv=False,
save_png=False,
ips=None):
self.df = None
self.ips = ips
self.system_info = system_information
self.save_csv = save_csv
self.save_png = save_png
def generate_csv(self):
results = [pd.read_html('http://%s/html-reports/latest.html' % url, attrs={'id': 'myTable2'})[0] for url in
self.ips]
systems = [pd.read_html('http://%s/html-reports/latest.html' % url, attrs={'id': 'SystemInformation'})[0] for
url in self.ips]
for df in range(0, len(self.ips)):
results[df]['IP'] = self.ips[df]
systems[df]['IP'] = self.ips[df]
dfs = [pd.merge(results[n], systems[n], on='IP') for n in range(len(self.ips))]
self.df = pd.concat(dfs)
self.df = self.df[self.df['STDOUT'] == 'STDOUT']
if self.save_csv:
self.df.to_csv('test_specific_results.csv')
def generate_report(self):
system_variations = self.df[
['IP', 'Python version', 'LANforge version', 'OS Version', 'Hostname',
'Python Environment']].drop_duplicates(
['IP', 'Python version', 'LANforge version', 'OS Version', 'Hostname', 'Python Environment']).reset_index(
drop=True)
errors = list()
lanforge_errors = list()
partial_failures = list()
major_errors = list()
successes = list()
for index in system_variations.index:
variation = system_variations.iloc[index]
system = self.df.loc[
self.df[['Python version', 'LANforge version', 'OS Version', 'Python Environment', 'IP']].isin(
dict(
variation).values()).all(axis=1), :]
result = system.dropna(subset=['STDERR']).shape[0]
errors.append(result)
lanforge_result = system.dropna(subset=['LANforge Error']).shape[0]
partial_failures.append(system[system['Status'] == 'Partial Failure'].shape[0])
major_errors.append(system[system['Status'] == 'ERROR'].shape[0])
lanforge_errors.append(lanforge_result)
successes.append(system[system['Status'] == 'Success'].shape[0])
system_variations['Successes'] = successes
system_variations['Errors'] = errors
system_variations['LANforge errors'] = lanforge_errors
system_variations['Python errors'] = system_variations['Errors'] - system_variations['LANforge errors']
system_variations['Partial Failures'] = partial_failures
system_variations['Major Errors'] = major_errors
if self.save_csv:
system_variations.to_csv('regression_suite_results.csv')
else:
print(system_variations.sort_values('Successes'))
if self.save_png:
now = datetime.datetime.now()
fail = pd.DataFrame(dict(self.df[self.df['Status'] != 'Success']['Command Name'].value_counts()).items())
success = pd.DataFrame(dict(self.df[self.df['Status'] == 'Success']['Command Name'].value_counts()).items())
success['status'] = True
fail['status'] = False
df = pd.concat([success, fail])
fig = px.bar(df, x=0, y=1, color='status', title="%s regression results" % now)
fig.write_image("script_statuses.png", width=1280, height=540)
print('Saved png')
heatmap = self.df
heatmap['Status'] = heatmap['Status'].replace('Success', 2).replace('Failure', -2).replace(
'Partial Failure', 0).replace('ERROR', -1)
heatmap['System'] = heatmap['Hostname'] + '\n' + heatmap['Python Environment']
pivot_df = heatmap.sort_values('Status').drop_duplicates(['Command Name', 'System'])
fig = go.Figure(go.Heatmap(x=pivot_df['Command Name'], z=pivot_df['Status'], y=pivot_df['Hostname']))
fig.update_layout(title="%s regression results" % now)
fig.write_image("script_device_heatmap.png", width=1280, height=540)
print('Created first heatmap')
fig, ax = plt.subplots(1, 1, figsize=(18, 8))
my_colors = [(0.7, 0.3, 0.3), (0.7, 0.5, 0.8), (.9, .9, 0.4), (0.1, 0.6, 0)]
sns.heatmap(pd.pivot_table(pivot_df, values='Status',
index='Command Name', columns='Hostname'),
ax=ax,
cmap=my_colors,
linewidth=0.1,
linecolor=(0.1, 0.2, 0.2))
ax.title.set_text('%s regression results' % now)
colorbar = ax.collections[0].colorbar
colorbar.set_ticks([-1.5, -.5, 0.5, 1.5])
colorbar.set_ticklabels(['ERROR', 'Failure', 'Partial Failure', 'Success'])
plt.savefig('script_device_heatmap_2.png')
print('Created second heatmap')
def main():
parser = argparse.ArgumentParser(description='Compare regression results from different systems')
parser.add_argument('--system_info', help='location of system information csv', default=None)
parser.add_argument('--save_csv', help='save CSV of results', action='store_true')
parser.add_argument('--save_png', help='save PNG of results', action='store_true')
parser.add_argument('--ip', help='IP addresses of LANforge devices you want to probe', action='append')
args = parser.parse_args()
if args.ip is None:
args.ip = ['192.168.92.18', '192.168.92.12', '192.168.93.51', '192.168.92.15', '192.168.100.184',
'192.168.100.30']
Miner = MineRegression(system_information=args.system_info,
save_csv=args.save_csv,
save_png=args.save_png,
ips=args.ip)
Miner.generate_csv()
Miner.generate_report()
if __name__ == '__main__':
main()
|
the-stack_106_31586 | import multiprocessing as mp
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
# from torch.nn.parallel import DistributedDataParallelCPU as DDPC # Deprecated
from rlpyt.utils.quick_args import save__init__args
from rlpyt.utils.collections import namedarraytuple
from rlpyt.utils.synchronize import RWLock
from rlpyt.utils.logging import logger
from rlpyt.utils.tensor import select_at_indexes
from rlpyt.models.utils import strip_ddp_state_dict
AgentInputs = namedarraytuple("AgentInputs",
["observation", "prev_action", "prev_reward"])
AgentStep = namedarraytuple("AgentStep", ["action", "agent_info"])
class OCOptimizerMixin:
"""Mixin class for option critic methods
Option critic methods often have separate learning rates for different components. Specifically, main (3e-4),
termination (5e-7), interest functions (1e-3)
Returns parameters for (1) All components except termination, interest, and policy over options, (2) termination,
(3) policy over options (4) interest
"""
def parameters(self):
beta_ps = [p for n, p in self.model.named_parameters() if 'beta' in n] # Termination
interest_ps = [p for n, p in self.model.named_parameters() if 'interest' in n] # Interest
pi_omega_ps = [p for n, p in self.model.named_parameters() if 'pi_omega' in n] # policy over options
other_ps = [p for n, p in self.model.named_parameters() if ('beta' not in n and 'interest' not in n and 'pi_omega' not in n)]
param_dict = {'main': other_ps, 'beta': beta_ps, 'pi_omega': pi_omega_ps, 'interest': interest_ps}
return param_dict
class BaseAgent:
"""
The agent performs many functions, including: action-selection during
sampling, returning policy-related values to use in training (e.g. action
probabilities), storing recurrent state during sampling, managing model
device, and performing model parameter communication between processes.
The agent is both interfaces: sampler<-->neural network<-->algorithm.
Typically, each algorithm and environment combination will require at
least some of its own agent functionality.
The base agent automatically carries out some of these roles. It assumes
there is one neural network model. Agents using multiple models might
need to extend certain funcionality to include those models, depending on
how they are used.
"""
recurrent = False
alternating = False
def __init__(self, ModelCls=None, model_kwargs=None, initial_model_state_dict=None):
"""
Arguments are saved but no model initialization occurs.
Args:
ModelCls: The model class to be used.
model_kwargs (optional): Any keyword arguments to pass when instantiating the model.
initial_model_state_dict (optional): Initial model parameter values.
"""
save__init__args(locals())
self.model = None # type: torch.nn.Module
self.shared_model = None
self.distribution = None
self.device = torch.device("cpu")
self._mode = None
if self.model_kwargs is None:
self.model_kwargs = dict()
# The rest only for async operations:
self._rw_lock = RWLock()
self._send_count = mp.RawValue("l", 0)
self._recv_count = 0
def __call__(self, observation, prev_action, prev_reward, device="cpu"):
"""Returns values from model forward pass on training data (i.e. used
in algorithm)."""
raise NotImplementedError
def initialize(self, env_spaces, share_memory=False, **kwargs):
"""
Instantiates the neural net model(s) according to the environment
interfaces.
Uses shared memory as needed--e.g. in CpuSampler, workers have a copy
of the agent for action-selection. The workers automatically hold
up-to-date parameters in ``model``, because they exist in shared
memory, constructed here before worker processes fork. Agents with
additional model components (beyond ``self.model``) for
action-selection should extend this method to share those, as well.
Typically called in the sampler during startup.
Args:
env_spaces: passed to ``make_env_to_model_kwargs()``, typically namedtuple of 'observation' and 'action'.
share_memory (bool): whether to use shared memory for model parameters.
"""
self.env_model_kwargs = self.make_env_to_model_kwargs(env_spaces)
self.model = self.ModelCls(**self.env_model_kwargs,
**self.model_kwargs)
# self.model = torch.jit.script(self.model) # Compile model
if share_memory:
self.model.share_memory()
# Store the shared_model (CPU) under a separate name, in case the
# model gets moved to GPU later:
self.shared_model = self.model
if self.initial_model_state_dict is not None:
self.model.load_state_dict(self.initial_model_state_dict)
self.env_spaces = env_spaces
self.share_memory = share_memory
def make_env_to_model_kwargs(self, env_spaces):
"""Generate any keyword args to the model which depend on environment interfaces."""
return {}
def to_device(self, cuda_idx=None):
"""Moves the model to the specified cuda device, if not ``None``. If
sharing memory, instantiates a new model to preserve the shared (CPU)
model. Agents with additional model components (beyond
``self.model``) for action-selection or for use during training should
extend this method to move those to the device, as well.
Typically called in the runner during startup.
"""
if cuda_idx is None:
return
if self.shared_model is not None:
self.model = self.ModelCls(**self.env_model_kwargs,
**self.model_kwargs)
self.model.load_state_dict(self.shared_model.state_dict())
self.device = torch.device("cuda", index=cuda_idx)
self.model.to(self.device)
logger.log(f"Initialized agent model on device: {self.device}.")
def data_parallel(self):
"""Wraps the model with PyTorch's DistributedDataParallel. The
intention is for rlpyt to create a separate Python process to drive
each GPU (or CPU-group for CPU-only, MPI-like configuration). Agents
with additional model components (beyond ``self.model``) which will
have gradients computed through them should extend this method to wrap
those, as well.
Typically called in the runner during startup.
"""
device_id = self.device.index # None if cpu, else cuda index.
self.model = DDP(
self.model,
device_ids=None if device_id is None else [device_id], # 1 GPU.
output_device=device_id,
)
logger.log("Initialized DistributedDataParallel agent model on "
f"device {self.device}.")
return device_id
def async_cpu(self, share_memory=True):
"""Used in async runner only; creates a new model instance to be used
in the sampler, separate from the model shared with the optimizer
process. The sampler can operate asynchronously, and choose when to
copy the optimizer's (shared) model parameters into its model (under
read-write lock). The sampler model may be stored in shared memory,
as well, to instantly share values with sampler workers. Agents with
additional model components (beyond ``self.model``) should extend this
method to do the same with those, if using in asynchronous mode.
Typically called in the runner during startup.
TODO: double-check wording if this happens in sampler and optimizer."""
if self.device.type != "cpu":
return
assert self.shared_model is not None
self.model = self.ModelCls(**self.env_model_kwargs,
**self.model_kwargs)
# TODO: might need strip_ddp_state_dict.
self.model.load_state_dict(self.shared_model.state_dict())
if share_memory: # Not needed in async_serial.
self.model.share_memory() # For CPU workers in async_cpu.
logger.log("Initialized async CPU agent model.")
def collector_initialize(self, global_B=1, env_ranks=None):
"""If needed to initialize within CPU sampler (e.g. vector epsilon-greedy,
see EpsilonGreedyAgent for details)."""
pass
@torch.no_grad() # Hint: apply this decorator on overriding method.
def step(self, observation, prev_action, prev_reward, device="cpu"):
"""Returns selected actions for environment instances in sampler."""
raise NotImplementedError # return type: AgentStep
def reset(self):
pass
def reset_one(self, idx):
pass
def reset_multiple(self, indexes):
pass
def parameters(self):
"""Parameters to be optimized (overwrite in subclass if multiple models)."""
return self.model.parameters()
def state_dict(self):
"""Returns model parameters for saving."""
return self.model.state_dict()
def load_state_dict(self, state_dict):
"""Load model parameters, should expect format returned from ``state_dict()``."""
self.model.load_state_dict(state_dict)
def train_mode(self, itr):
"""Go into training mode (e.g. see PyTorch's ``Module.train()``)."""
self.model.train()
self._mode = "train"
def sample_mode(self, itr):
"""Go into sampling mode."""
self.model.eval()
self._mode = "sample"
def eval_mode(self, itr):
"""Go into evaluation mode. Example use could be to adjust epsilon-greedy."""
self.model.eval()
self._mode = "eval"
def sync_shared_memory(self):
"""Copies model parameters into shared_model, e.g. to make new values
available to sampler workers. If running CPU-only, these will be the
same object--no copy necessary. If model is on GPU, copy to CPU is
performed. (Requires ``initialize(share_memory=True)`` called
previously. Not used in async mode.
Typically called in the XXX during YY.
"""
if self.shared_model is not self.model: # (self.model gets trained)
self.shared_model.load_state_dict(strip_ddp_state_dict(
self.model.state_dict()))
def send_shared_memory(self):
"""Used in async mode only, in optimizer process; copies parameters
from trained model (maybe GPU) to shared model, which the sampler can
access. Does so under write-lock, and increments send-count which sampler
can check.
Typically called in the XXX during YY."""
if self.shared_model is not self.model:
with self._rw_lock.write_lock:
self.shared_model.load_state_dict(
strip_ddp_state_dict(self.model.state_dict()))
self._send_count.value += 1
def recv_shared_memory(self):
"""Used in async mode, in sampler process; copies parameters from
model shared with optimizer into local model, if shared model has been
updated. Does so under read-lock. (Local model might also be shared
with sampler workers).
Typically called in the XXX during YY."""
if self.shared_model is not self.model:
with self._rw_lock:
if self._recv_count < self._send_count.value:
self.model.load_state_dict(self.shared_model.state_dict())
self._recv_count = self._send_count.value
def toggle_alt(self):
pass # Only needed for recurrent alternating agent, but might get called.
AgentInputsRnn = namedarraytuple("AgentInputsRnn", # Training only.
["observation", "prev_action", "prev_reward", "init_rnn_state"])
class RecurrentAgentMixin:
"""
Mixin class to manage recurrent state during sampling (so the sampler
remains agnostic). To be used like ``class
MyRecurrentAgent(RecurrentAgentMixin, MyAgent):``.
"""
recurrent = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prev_rnn_state = None
self._sample_rnn_state = None # Store during eval.
def reset(self):
"""Sets the recurrent state to ``None``, which built-in PyTorch
modules conver to zeros."""
self._prev_rnn_state = None
def reset_one(self, idx):
"""Sets the recurrent state corresponding to one environment instance
to zero. Assumes rnn state is in cudnn-compatible shape: [N,B,H],
where B corresponds to environment index."""
if self._prev_rnn_state is not None:
self._prev_rnn_state[:, idx] = 0 # Automatic recursion in namedarraytuple.
def reset_multiple(self, indexes):
self.reset_one(indexes) # Should be same if I pass proper indices
def advance_rnn_state(self, new_rnn_state):
"""Sets the recurrent state to the newly computed one (i.e. recurrent agents should
call this at the end of their ``step()``). """
self._prev_rnn_state = new_rnn_state
@property
def prev_rnn_state(self):
return self._prev_rnn_state
def train_mode(self, itr):
"""If coming from sample mode, store the rnn state elsewhere and clear it."""
if self._mode == "sample":
self._sample_rnn_state = self._prev_rnn_state
self._prev_rnn_state = None
super().train_mode(itr)
def sample_mode(self, itr):
"""If coming from non-sample modes, restore the last sample-mode rnn state."""
if self._mode != "sample":
self._prev_rnn_state = self._sample_rnn_state
super().sample_mode(itr)
def eval_mode(self, itr):
"""If coming from sample mode, store the rnn state elsewhere and clear it."""
if self._mode == "sample":
self._sample_rnn_state = self._prev_rnn_state
self._prev_rnn_state = None
super().eval_mode(itr)
AgentInputsOC = namedarraytuple("AgentInputsOC", # Training only.
["observation", "prev_action", "prev_reward", "sampled_option"])
AgentInputsOCRnn = namedarraytuple("AgentInputsOCRnn", # Training only.
["observation", "prev_action", "prev_reward", "sampled_option", "init_rnn_state"])
class OCAgentMixin:
"""
Mixin class to manage option state during sampling (so the sampler
remains agnostic). To be used like ``class
MyOCAgent(RecurrentAgentMixin, MyAgent):``.
"""
recurrent = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prev_rnn_state = None
self._sample_rnn_state = None # Store during eval.
self._prev_option = None
self._sample_prev_option = None # Store during eval.
assert 'option_size' in self.model_kwargs
self.n_opt = self.model_kwargs['option_size']
def reset(self):
"""Sets the previous option to ``None``"""
self._prev_option = None
self._prev_rnn_state = None
def reset_one(self, idx):
"""Sets the previous option corresponding to one environment instance
to -1. Assumes prev_option is in cudnn-compatible shape: [B],
where B corresponds to environment index."""
if self._prev_option is not None:
self._prev_option[idx] = -1 # Automatic recursion in namedarraytuple.
if self._prev_rnn_state is not None:
self._prev_rnn_state[:, idx] = 0 # Automatic recursion in namedarraytuple.
def reset_multiple(self, indexes):
self.reset_one(indexes) # Should be same if I pass proper indices
def sample_option(self, betas, option_dist_info):
"""Sample options according to which previous options are terminated and probability over options"""
if self._prev_option is None: # No previous option, store as -1
self._prev_option = torch.full(betas.size()[:-1], -1,dtype=torch.long, device=betas.device)
terminations = select_at_indexes(self._prev_option, torch.bernoulli(betas).bool())
options = self._prev_option.clone()
new_o = self.distribution_omega.sample(option_dist_info).expand_as(self._prev_option)
options[self._prev_option == -1] = new_o[self._prev_option == -1] # Must terminate, episode reset
mask = self._prev_option != -1
options[mask] = torch.where(terminations.view(-1)[mask].flatten(), new_o[mask], self._prev_option[mask])
return options, terminations
def advance_oc_state(self, new_option):
"""Sets the previous option to the newly computed one (i.e. option-critic agents should
call this at the end of their ``step()``). """
self._prev_option = new_option
def advance_rnn_state(self, new_rnn_state):
"""Sets the recurrent state to the newly computed one (i.e. recurrent agents should
call this at the end of their ``step()``). """
self._prev_rnn_state = new_rnn_state
@property
def prev_option(self):
return self._prev_option
@property
def prev_rnn_state(self):
return self._prev_rnn_state
def train_mode(self, itr):
"""If coming from sample mode, store the previous option elsewhere and clear it."""
if self._mode == "sample":
self._sample_prev_option = self._prev_option
self._sample_rnn_state = self._prev_rnn_state
self._prev_option = None
self._prev_rnn_state = None
super().train_mode(itr)
def sample_mode(self, itr):
"""If coming from non-sample modes, restore the last sample-mode rnn state."""
if self._mode != "sample":
self._prev_option = self._sample_prev_option
self._prev_rnn_state = self._sample_rnn_state
super().sample_mode(itr)
def eval_mode(self, itr):
"""If coming from sample mode, store the rnn state elsewhere and clear it."""
if self._mode == "sample":
self._sample_prev_option = self._prev_option
self._sample_rnn_state = self._prev_rnn_state
self._prev_option = None
self._prev_rnn_state = None
super().eval_mode(itr)
class RecurrentOCAgentMixin(OCAgentMixin):
recurrent = True
class AlternatingRecurrentAgentMixin:
"""
Maintain an alternating pair of recurrent states to use when stepping in
the sampler. Automatically swap them out when ``advance_rnn_state()`` is
called, so it otherwise behaves like regular recurrent agent. Should use
only in alternating samplers, where two sets of environment instances take
turns stepping (no special class needed for feedforward agents). Use in
place of ``RecurrentAgentMixin``.
"""
recurrent = True
alternating = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._alt = 0
self._prev_rnn_state = None
self._prev_rnn_state_pair = [None, None]
self._sample_rnn_state_pair = [None, None]
def reset(self):
self._prev_rnn_state_pair = [None, None]
self._prev_rnn_state = None
self._alt = 0
# Leave _sample_rnn_state_pair alone.
def reset_one(self, idx):
"""Sets the recurrent state corresponding to one environment instance
to zero. Assumes rnn state is in cudnn-compatible shape: [N,B,H],
where B corresponds to environment index."""
if self._prev_rnn_state is not None:
self._prev_rnn_state[:, idx] = 0 # Automatic recursion in namedarraytuple.
def reset_multiple(self, indexes):
self.reset_one(indexes) # Should be same if I pass proper indices
def advance_rnn_state(self, new_rnn_state):
"""To be called inside agent.step()."""
self._prev_rnn_state_pair[self._alt] = new_rnn_state
self._alt ^= 1
self._prev_rnn_state = self._prev_rnn_state_pair[self._alt]
@property
def prev_rnn_state(self):
return self._prev_rnn_state
def train_mode(self, itr):
if self._mode == "sample":
self._sample_rnn_state_pair = self._prev_rnn_state_pair
self._prev_rnn_state_pair = [None, None]
self._prev_rnn_state = None
self._alt = 0
super().train_mode(itr)
def sample_mode(self, itr):
if self._mode != "sample":
self._prev_rnn_state_pair = self._sample_rnn_state_pair
self._alt = 0
self._prev_rnn_state = self._prev_rnn_state_pair[0]
super().sample_mode(itr)
def eval_mode(self, itr):
if self._mode == "sample":
self._sample_rnn_state_pair = self._prev_rnn_state_pair
self._prev_rnn_state_pair = [None, None]
self._prev_rnn_state = None
self._alt = 0
super().eval_mode(itr)
def get_alt(self):
return self._alt
def toggle_alt(self):
self._alt ^= 1
self._prev_rnn_state = self._prev_rnn_state_pair[self._alt]
class AlternatingOCAgentMixin:
"""
Maintain an alternating pair of recurrent states to use when stepping in
the sampler. Automatically swap them out when ``advance_rnn_state()`` is
called, so it otherwise behaves like regular recurrent agent. Should use
only in alternating samplers, where two sets of environment instances take
turns stepping (no special class needed for feedforward agents). Use in
place of ``RecurrentAgentMixin``.
"""
recurrent = False
alternating = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._alt = 0
self._alt_o = 0
self._prev_rnn_state = None
self._prev_rnn_state_pair = [None, None]
self._sample_rnn_state_pair = [None, None]
self._prev_option = None
self._prev_option_pair = [None, None]
self._sample_option_pair = [None, None]
assert 'option_size' in self.model_kwargs
self.n_opt = self.model_kwargs['option_size']
def reset(self):
self._prev_rnn_state_pair = [None, None]
self._prev_rnn_state = None
self._prev_option = None
self._prev_option_pair = [None, None]
self._alt = 0
self._alt_o = 0
# Leave _sample_rnn_state_pair alone.
def reset_one(self, idx):
"""Sets the previous option corresponding to one environment instance
to -1. Assumes prev_option is in cudnn-compatible shape: [B],
where B corresponds to environment index."""
if self._prev_option is not None:
self._prev_option[idx] = -1 # Automatic recursion in namedarraytuple.
if self._prev_rnn_state is not None:
self._prev_rnn_state[:, idx] = 0 # Automatic recursion in namedarraytuple.
def reset_multiple(self, indexes):
self.reset_one(indexes) # Should be same if I pass proper indices
def advance_rnn_state(self, new_rnn_state):
"""To be called inside agent.step()."""
self._prev_rnn_state_pair[self._alt] = new_rnn_state
self._alt ^= 1
self._prev_rnn_state = self._prev_rnn_state_pair[self._alt]
def advance_oc_state(self, new_option):
"""Sets the previous option to the newly computed one (i.e. option-critic agents should
call this at the end of their ``step()``). """
self._prev_option_pair[self._alt_o] = new_option
self._alt_o ^= 1
self._prev_option = self._prev_option_pair[self._alt_o]
def sample_option(self, betas, option_dist_info):
"""Sample options according to which previous options are terminated and probability over options"""
# betas = betas.view(-1, self.n_opt)
if self._prev_option is None: # No previous option, store as -1
self._prev_option = torch.full(betas.size()[:-1], -1,dtype=torch.long, device=betas.device)
terminations = select_at_indexes(self._prev_option, torch.bernoulli(betas).bool())
options = self._prev_option.clone()
new_o = self.distribution_omega.sample(option_dist_info).expand_as(self._prev_option)
options[self._prev_option == -1] = new_o[self._prev_option == -1] # Must terminate, episode reset
mask = self._prev_option != -1
options[mask] = torch.where(terminations.view(-1)[mask].flatten(), new_o[mask], self._prev_option[mask])
return options, terminations
@property
def prev_rnn_state(self):
return self._prev_rnn_state
@property
def prev_option(self):
return self._prev_option
def train_mode(self, itr):
if self._mode == "sample":
self._sample_rnn_state_pair = self._prev_rnn_state_pair
self._sample_option_pair = self._prev_option_pair
self._prev_rnn_state_pair = [None, None]
self._prev_rnn_state = None
self._alt = 0
self._prev_option_pair = [None, None]
self._prev_option_state = None
self._alt_o = 0
super().train_mode(itr)
def sample_mode(self, itr):
if self._mode != "sample":
self._prev_rnn_state_pair = self._sample_rnn_state_pair
self._alt = 0
self._prev_rnn_state = self._prev_rnn_state_pair[0]
self._prev_option_pair_pair = self._sample_option_pair
self._alt_o = 0
self._prev_option = self._prev_option_pair[0]
super().sample_mode(itr)
def eval_mode(self, itr):
if self._mode == "sample":
self._sample_rnn_state_pair = self._prev_rnn_state_pair
self._sample_option_pair = self._prev_option_pair
self._prev_rnn_state_pair = [None, None]
self._prev_rnn_state = None
self._alt = 0
self._prev_option_pair = [None, None]
self._prev_option_state = None
self._alt_o = 0
super().eval_mode(itr)
def get_alt(self):
return self._alt_o
def toggle_alt(self):
self._alt ^= 1
self._alt_o ^= 1
self._prev_rnn_state = self._prev_rnn_state_pair[self._alt]
self._prev_option_state = self._prev_option_pair[self._alt_o]
class AlternatingRecurrentOCAgentMixin(AlternatingOCAgentMixin):
recurrent = True
|
the-stack_106_31587 | import numpy as np
class Geom:
"""
A class for operations regarding simulation geometry and configuration.
Attributes
----------
method : string, either 'random' or 'file'
Method of generating initial state.
**kwargs : See Below
Keyword Arguments
-----------------
file_name : string
Name of file used to generate initial state.
num_particles : integer
Number of particles to generate.
box_length : integer or float
Length of box to generate.
Methods
-------
generate_initial_state :
Generate initial coordinates of particles in a box either randomly or based on a file.
minimum_image_distance :
Calculate minimum image distance between two particles, i and j.
wrap :
Wrap a vector back to periodic box.
save_state :
Save current simulation state into a txt file. First line is box dimension, second line is number of particles, and the rest are particle coordinates.
"""
def __init__(self, method, **kwargs):
"""
The constructor for Geom class.
Parameters
----------
method : string, either 'random' or 'file'
Method of generating initial state.
**kwargs : See Below
Keyword Arguments
-----------------
file_name : string
Name of file used to generate initial state.
num_particles : integer
Number of particles to generate.
box_length : integer or float
Length of box to generate.
"""
self.generate_initial_state(method, **kwargs)
def generate_initial_state(self, method, **kwargs):
"""
Generate initial coordinates of particles in a box either randomly or based on a file.
Parameters
----------
method : string, either 'random' or 'file'
Method of generating initial state.
file_name : string
Name of file used to generate initial state.
num_particles : integer
Number of particles to generate.
box_length : integer or float
Length of box to generate.
Returns
-------
coordinates : array
Array of particle coordinates generated for an initial state
"""
if method is 'random':
if (kwargs['num_particles'] == None or kwargs['reduced_den'] == None):
raise ValueError(' "num_particles" and "reduced_den" arguments must be set for method=random!')
self.num_particles = kwargs['num_particles']
self.box_length = np.cbrt(self.num_particles / kwargs['reduced_den'])
self.volume = self.box_length**3
self.coordinates = (0.5 - np.random.rand(self.num_particles, 3)) * self.box_length
elif method is 'file':
if (kwargs['file_name'] == None):
raise ValueError('"filename" argument must be set for method = file!')
file_name = kwargs['file_name']
with open(file_name) as f:
lines = f.readlines()
self.box_length = np.fromstring(lines[0], dtype=float, sep=',')[0]
self.volume = self.box_length**3
self.num_particles = np.fromstring(lines[1], dtype=float, sep=',')[0]
self.coordinates = np.loadtxt(file_name, skiprows=2, usecols=(1, 2, 3))
if (self.num_particles != self.coordinates.shape[0]):
raise ValueError('Inconsistent value of number of particles in file!')
else:
raise TypeError('Method type not recognized.')
def minimum_image_distance(self, r_i, coords):
"""
Calculate minimum image distance between two particles, i and j.
Parameters
---------
box_length : integer or float
Length of box to generate.
r_i : array
Coordinates of particle i.
coords : array
Coordintes of particle j or array of positions
Returns
-------
rij2 : square of the distance between particles i and j, or particle i and all the coords.
"""
rij = r_i - coords
rij = rij - self.box_length * np.round(rij / self.box_length)
rij2 = np.sum(rij**2, axis=-1)
return rij2
def wrap(self, v):
"""
Wrap a vector back to periodic box.
Parameters
----------
v : the vector to be wrapped
Returns
-------
wrapped_v : the vector wrapped back to simulation box
"""
wrapped_v = v - self.box_length * np.round(v / self.box_length)
return wrapped_v
def get_particle_coordinates(self):
"""
Get the coordinates of all particles in the system.
Parameters
----------
None
Returns
-------
None
"""
return self.coordinates
def save_state(self, file_name):
"""
Save current simulation state into a txt file. First line is box dimension, second line is number of particles, and the rest are particle coordinates
Parameters
----------
None
Returns
-------
None
"""
import os.path
if (os.path.exists(file_name)):
raise ValueError("File already exists!")
f = open(file_name, "w+")
f.write("%.18e %.18e %.18e\n" % (self.box_length, self.box_length, self.box_length))
f.write("%d\n" % (self.num_particles))
f.close()
f = open(file_name, 'ab')
np.savetxt(f, self.coordinates)
f.close()
|
the-stack_106_31590 | """Integration with the Rachio Iro sprinkler system controller."""
from abc import abstractmethod
from datetime import timedelta
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.helpers.dispatcher import dispatcher_connect
from . import (
CONF_MANUAL_RUN_MINS,
DOMAIN as DOMAIN_RACHIO,
KEY_DEVICE_ID,
KEY_ENABLED,
KEY_ID,
KEY_NAME,
KEY_ON,
KEY_SUBTYPE,
KEY_SUMMARY,
KEY_ZONE_ID,
KEY_ZONE_NUMBER,
SIGNAL_RACHIO_CONTROLLER_UPDATE,
SIGNAL_RACHIO_ZONE_UPDATE,
SUBTYPE_SLEEP_MODE_OFF,
SUBTYPE_SLEEP_MODE_ON,
SUBTYPE_ZONE_COMPLETED,
SUBTYPE_ZONE_STARTED,
SUBTYPE_ZONE_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
ATTR_ZONE_SUMMARY = "Summary"
ATTR_ZONE_NUMBER = "Zone number"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Rachio switches."""
manual_run_time = timedelta(
minutes=hass.data[DOMAIN_RACHIO].config.get(CONF_MANUAL_RUN_MINS)
)
_LOGGER.info("Rachio run time is %s", str(manual_run_time))
# Add all zones from all controllers as switches
devices = []
for controller in hass.data[DOMAIN_RACHIO].controllers:
devices.append(RachioStandbySwitch(hass, controller))
for zone in controller.list_zones():
devices.append(RachioZone(hass, controller, zone, manual_run_time))
add_entities(devices)
_LOGGER.info("%d Rachio switch(es) added", len(devices))
class RachioSwitch(SwitchDevice):
"""Represent a Rachio state that can be toggled."""
def __init__(self, controller, poll=True):
"""Initialize a new Rachio switch."""
self._controller = controller
if poll:
self._state = self._poll_update()
else:
self._state = None
@property
def should_poll(self) -> bool:
"""Declare that this entity pushes its state to HA."""
return False
@property
def name(self) -> str:
"""Get a name for this switch."""
return "Switch on {}".format(self._controller.name)
@property
def is_on(self) -> bool:
"""Return whether the switch is currently on."""
return self._state
@abstractmethod
def _poll_update(self, data=None) -> bool:
"""Poll the API."""
pass
def _handle_any_update(self, *args, **kwargs) -> None:
"""Determine whether an update event applies to this device."""
if args[0][KEY_DEVICE_ID] != self._controller.controller_id:
# For another device
return
# For this device
self._handle_update(args, kwargs)
@abstractmethod
def _handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook data."""
pass
class RachioStandbySwitch(RachioSwitch):
"""Representation of a standby status/button."""
def __init__(self, hass, controller):
"""Instantiate a new Rachio standby mode switch."""
dispatcher_connect(
hass, SIGNAL_RACHIO_CONTROLLER_UPDATE, self._handle_any_update
)
super().__init__(controller, poll=False)
self._poll_update(controller.init_data)
@property
def name(self) -> str:
"""Return the name of the standby switch."""
return "{} in standby mode".format(self._controller.name)
@property
def unique_id(self) -> str:
"""Return a unique id by combinining controller id and purpose."""
return "{}-standby".format(self._controller.controller_id)
@property
def icon(self) -> str:
"""Return an icon for the standby switch."""
return "mdi:power"
def _poll_update(self, data=None) -> bool:
"""Request the state from the API."""
if data is None:
data = self._controller.rachio.device.get(self._controller.controller_id)[1]
return not data[KEY_ON]
def _handle_update(self, *args, **kwargs) -> None:
"""Update the state using webhook data."""
if args[0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_ON:
self._state = True
elif args[0][KEY_SUBTYPE] == SUBTYPE_SLEEP_MODE_OFF:
self._state = False
self.schedule_update_ha_state()
def turn_on(self, **kwargs) -> None:
"""Put the controller in standby mode."""
self._controller.rachio.device.off(self._controller.controller_id)
def turn_off(self, **kwargs) -> None:
"""Resume controller functionality."""
self._controller.rachio.device.on(self._controller.controller_id)
class RachioZone(RachioSwitch):
"""Representation of one zone of sprinklers connected to the Rachio Iro."""
def __init__(self, hass, controller, data, manual_run_time):
"""Initialize a new Rachio Zone."""
self._id = data[KEY_ID]
self._zone_name = data[KEY_NAME]
self._zone_number = data[KEY_ZONE_NUMBER]
self._zone_enabled = data[KEY_ENABLED]
self._manual_run_time = manual_run_time
self._summary = str()
super().__init__(controller)
# Listen for all zone updates
dispatcher_connect(hass, SIGNAL_RACHIO_ZONE_UPDATE, self._handle_update)
def __str__(self):
"""Display the zone as a string."""
return 'Rachio Zone "{}" on {}'.format(self.name, str(self._controller))
@property
def zone_id(self) -> str:
"""How the Rachio API refers to the zone."""
return self._id
@property
def name(self) -> str:
"""Return the friendly name of the zone."""
return self._zone_name
@property
def unique_id(self) -> str:
"""Return a unique id by combinining controller id and zone number."""
return "{}-zone-{}".format(self._controller.controller_id, self.zone_id)
@property
def icon(self) -> str:
"""Return the icon to display."""
return "mdi:water"
@property
def zone_is_enabled(self) -> bool:
"""Return whether the zone is allowed to run."""
return self._zone_enabled
@property
def state_attributes(self) -> dict:
"""Return the optional state attributes."""
return {ATTR_ZONE_NUMBER: self._zone_number, ATTR_ZONE_SUMMARY: self._summary}
def turn_on(self, **kwargs) -> None:
"""Start watering this zone."""
# Stop other zones first
self.turn_off()
# Start this zone
self._controller.rachio.zone.start(self.zone_id, self._manual_run_time.seconds)
_LOGGER.debug("Watering %s on %s", self.name, self._controller.name)
def turn_off(self, **kwargs) -> None:
"""Stop watering all zones."""
self._controller.stop_watering()
def _poll_update(self, data=None) -> bool:
"""Poll the API to check whether the zone is running."""
schedule = self._controller.current_schedule
return self.zone_id == schedule.get(KEY_ZONE_ID)
def _handle_update(self, *args, **kwargs) -> None:
"""Handle incoming webhook zone data."""
if args[0][KEY_ZONE_ID] != self.zone_id:
return
self._summary = kwargs.get(KEY_SUMMARY, str())
if args[0][KEY_SUBTYPE] == SUBTYPE_ZONE_STARTED:
self._state = True
elif args[0][KEY_SUBTYPE] in [SUBTYPE_ZONE_STOPPED, SUBTYPE_ZONE_COMPLETED]:
self._state = False
self.schedule_update_ha_state()
|
the-stack_106_31592 | from dataclasses import dataclass, field
from typing import Any, Dict, Optional
import torch
from toolbox.callbacks.callback_base import CallBack
from toolbox.utils.progress_bar import ProgressBar
@dataclass
class ProgressBarCB(CallBack):
progress_bar_size: int
_progress_bar: Optional[ProgressBar] = field(init=False, default=None)
_curr_epoch: int = field(init=False, default=0)
def on_train_epoch_begin(self, curr_epoch: int, total_epochs: int) -> None:
self._curr_epoch = curr_epoch
self._progress_bar = ProgressBar(self.progress_bar_size,
' loss: %.06f, batch: %d, epoch: %d')
def on_train_batch_end(self, curr_step: int, total_steps: int,
loss: torch.Tensor) -> None:
if self._progress_bar is not None:
self._progress_bar.progress(curr_step / total_steps * 100, loss,
self._curr_epoch)
def on_train_epoch_end(self, curr_epoch: int, total_epochs: int) -> None:
# Print a new line at the end of each epoch
print()
def serialize(self):
serialized = super().serialize()
serialized['progress_bar_size'] = self.progress_bar_size
@staticmethod
def deserialize(state: Dict[str, Any],
strict: bool = False) -> 'ProgressBarCB':
assert 'progress_bar_size' in state
progress_bar_cb = ProgressBarCB(state['progress_bar_size'])
return progress_bar_cb
|
the-stack_106_31593 | import pytest
import json
from web3 import EthereumTesterProvider, Web3
from eth_tester import EthereumTester, PyEVMBackend
import eth_tester.backends.pyevm.main as py_evm_main
from contracts.style_art import StyleArt
py_evm_main.GENESIS_GAS_LIMIT = 10000000
PRIVATE_KEY = "0x0000000000000000000000000000000000000000000000000000000000000001"
"""
To find private key of test account_keys:
pyevm_backend = PyEVMBackend()
for pk in pyevm_backend.accounts:
print(pk)
"""
@pytest.fixture
def tester_provider():
return EthereumTesterProvider(EthereumTester(PyEVMBackend()))
@pytest.fixture
def eth_tester(tester_provider):
return tester_provider.ethereum_tester
@pytest.fixture
def w3(tester_provider):
w3 = Web3(tester_provider)
return w3
@pytest.fixture
def contract(eth_tester, w3):
deploy_address = eth_tester.get_accounts()[0]
with open("contracts/StyleArt.json") as f:
contract = json.load(f)
abi = contract["abi"]
bytecode = contract["bytecode"]
StyleArtContract = w3.eth.contract(abi=abi, bytecode=bytecode)
tx_hash = StyleArtContract.constructor().transact({
"from": deploy_address,
"gas": 10000000
})
# wait for the transaction to be mined
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash, 180)
# instantiate and return an instance of our contract.
contract = w3.eth.contract(abi=abi, address=tx_receipt.contractAddress)
style_art = StyleArt(contract, w3, deploy_address, PRIVATE_KEY)
return style_art
def test_contract_creation(contract):
assert type(contract) is StyleArt
def test_total_supply(contract):
assert contract.total_supply() == 0
def test_balance_of(eth_tester, contract):
owner = eth_tester.get_accounts()[0]
contract.mint_nft(owner, "token_uri")
assert contract.balance_of(owner) == 1
def test_minting_nft(eth_tester, contract):
owner = eth_tester.get_accounts()[0]
address = eth_tester.get_accounts()[-1]
token_id = contract.mint_nft(address, "token_uri")
assert token_id == 1
assert contract.balance_of(address) == 1
assert contract.balance_of(owner) == 0
|
the-stack_106_31594 | import argparse
import os
import os.path as osp
from collections import defaultdict
import mmcv
from tqdm import tqdm
CLASSES = [
'pedestrian', 'rider', 'car', 'bus', 'truck', 'bicycle', 'motorcycle',
'train'
]
USELESS = ['traffic light', 'traffic sign']
IGNORES = ['trailer', 'other person', 'other vehicle']
def parse_args():
parser = argparse.ArgumentParser(
description='Convert BDD100K detection label to COCO format')
parser.add_argument('-i', '--input', help='path of BDD label json file')
parser.add_argument(
'-o', '--output', help='path to save coco formatted label file')
return parser.parse_args()
def main():
args = parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
for subset in ['train', 'val']:
print(f'convert BDD100K detection {subset} set into coco format')
bdd = mmcv.load(osp.join(args.input, f'det_v2_{subset}_release.json'))
coco = defaultdict(list)
for cls_id, cls in enumerate(CLASSES, 1):
coco['categories'].append(dict(id=cls_id, name=cls))
ann_id = 0
for img_id, img_info in enumerate(tqdm(bdd)):
img = dict(
file_name=img_info['name'],
height=720,
width=1280,
id=img_id,
metas=img_info['attributes'])
coco['images'].append(img)
if img_info['labels'] is None:
continue
for k, ann_info in enumerate(img_info['labels']):
if ann_info['category'] in CLASSES:
cls_id = CLASSES.index(ann_info['category']) + 1
elif ann_info['category'] in USELESS or ann_info[
'category'] in IGNORES:
continue
else:
raise ValueError('Category do not exist.')
x1 = ann_info['box2d']['x1']
x2 = ann_info['box2d']['x2']
y1 = ann_info['box2d']['y1']
y2 = ann_info['box2d']['y2']
area = float((x2 - x1) * (y2 - y1))
ann = dict(
id=ann_id,
image_id=img_id,
category_id=cls_id,
bbox=[x1, y1, x2 - x1, y2 - y1],
area=area,
occluded=ann_info['attributes']['occluded'],
truncated=ann_info['attributes']['truncated'])
coco['annotations'].append(ann)
ann_id += 1
mmcv.dump(
coco, osp.join(args.output,
f'bdd100k_det_{subset}_cocoformat.json'))
print('converted {} images with {} objects'.format(
len(coco['images']), len(coco['annotations'])))
if __name__ == '__main__':
main()
|
the-stack_106_31595 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : Visualate #
# Version : 0.1.0 #
# File : canvas.py #
# Python : 3.8.1 #
# --------------------------------------------------------------------------- #
# Author : John James #
# Company : DecisionScients #
# Email : [email protected] #
# URL : https://github.com/decisionscients/datastudio #
# --------------------------------------------------------------------------- #
# Created : Wednesday, March 4th 2020, 12:03:15 pm #
# Last Modified : Wednesday, March 4th 2020, 12:03:30 pm #
# Modified By : John James ([email protected]>) #
# --------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 DecisionScients #
# =========================================================================== #
"""Classes that manage plot 'canvas' configurations.
The configurable options for a plot are copious and it could be a burdonsome to
include these configuration options in each plot class. This would lead
to bloated interfaces that are difficult to read, let alone maintain.
The Canvas module allows users to specify configurable options separate from
the instantiation and rendering of plots. The Canvas object, containing all
plot configuration options, is passed into the constructors of the individual
plot classes. This allows users the flexibility of defining the 'canvas'
while reducing the burdon on the plot classes.
The module is comprised of the following classes.
Container
---------
* Canvas : Container class for Canvas component classes
Components
----------
* CanvasComponent : Abstract base class for the following classes
* CanvasTitle : Sets font and position of the plot title
* CanvasLegend : Sets style, font, position and behavior of the legend
* CanvasMargins : Sets plot margins
* CanvasSize : Sets plot width and height
* CanvasFont : Sets family, size and color of fonts
* CanvasColorBackground : Sets plot and page background colors
* CanvasColorScale : Sets sequential, divergent and colorway scales
* CanvasColorAxisDomain : Sets min, max, and mid values of the color scales
* CanvasColorAxisScales : Sets color scale
* CanvasColorAxisBarStyle : Sets color axis bar thickness, length and color
* CanvasColorAxisBarPosition : Sets the position of the color axis color bar
* CanvasColorAxisBarBoundary : Sets color axis border and outline color and width
* CanvasColorAxisBarTicks : Sets parameters for ticks
* CanvasColorAxisBarTickStyle : Sets the style of the ticks
* CanvasColorAxisBarTickFont : Sets the font of the ticks.
* CanvasColorAxisBarNumbers : Set number format
* CanvasColorAxisBarTitle : Sets the axis bar title family, size and color.
"""
import os
import time
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod, ABCMeta
# --------------------------------------------------------------------------- #
# Canvas #
# --------------------------------------------------------------------------- #
class Canvas():
"""A container class holding the various Canvas components.
A Canvas is a collection of CanvasComponents, each of which contains
a set of related visualization configuration parameters. Each
CanvasComponent class exposes its parameters are accessor properties. At
instantiation, the parameters are set to their default values.
"""
def __init__(self):
self.__components = {}
def print_components(self):
"""Prints component parameters and values to sysout."""
if bool(self.__components):
for component in self.__components.values():
component.print_parameters()
else:
print("The Canvas object has no parameters.")
def add_component(self, component):
"""Adds a CanvasComponent object to the collection.
Parameters
----------
component : CanvasComponent object
Class containing a group of parameters and accessors.
Returns
-------
self
"""
component_name = component.__class__.__name
if self.__components[component_name]:
raise Exception("CanvasComponent %s already exists in the Canvas."\
% component_name)
else:
self.__components[component_name] = component
return self
def update_component(self, component):
"""Updates an existing CanvasComponent with values from component.
Components are uniquely indexed by their class names. The existing
CanvasComponent is obtained via the class name of component. Then,
the existing CanvasComponent object parameter dictionary is updated
with the values from the component parameter dictionary.
Parameters
----------
component : CanvasComponent object
Object containing the parameter values to update.
Returns
-------
self
"""
component_name = component.__class__.__name
if not self.__components[component_name]:
raise Exception("CanvasComponent object %s does not exist." \
% component_name)
else:
parameters = component.get_parameters()
self.__components[component_name].update(parameters)
return self
def delete_component(self, component_name=None):
"""Removes a CanvasComponent object from the container.
Parameters
----------
component_name : str. Optional
The class name for the CanvasComponent class to delete. If None,
all CanvasComponents will be deleted subject to verification.
Returns
-------
self
"""
if component_name is None:
delete_all = input("Are you sure you want to delete all \
CanvasComponent objects from this class? (y/n)")
if delete_all in ['Y', 'y', 'Yes', 'yes', 'YES']:
print('Deleting CanvasComponent objects.')
self.__components[component_name] = dict()
else:
print('Leaving CanvasComponents in place.')
else:
try:
del self.__components[component_name]
except KeyError:
print("CanvasComponent object %s does not exist." \
% component_name)
return self
def __iter__(self):
return iter(self.__components)
# ----------------------------------------------------------------------- #
# RESET METHOD #
# ----------------------------------------------------------------------- #
def reset(self, component_name=None):
"""Resets CanvasComponent(s) to its/their default values."""
if component_name is None:
reset_all = input("Are you sure you want to reset all \
CanvasComponent objects to their default values? (y/n)")
if reset_all in ['Y', 'y', 'Yes', 'yes', 'YES']:
print('Resetting all CanvasComponent objects.')
for component in self.__components.values():
component.reset()
else:
print('Leaving CanvasComponents unchanged.')
else:
try:
self.__components[component_name].reset()
except KeyError:
print("CanvasComponent object %s does not exist." \
% component_name)
return self
# --------------------------------------------------------------------------- #
# CanvasComponent #
# --------------------------------------------------------------------------- #
class CanvasComponent(ABC):
"""Abstract base class for Canvas component classes."""
def __init__(self):
self.__parameters = {}
@abstractmethod
def reset(self):
"""Resets configuration to default values."""
pass
def print_parameters(self):
"""Prints current parameters and values."""
classname = self.__class__.__name__
print('\nParameters for %s' % classname)
for k, v in self.__parameters.items():
message = ' ' + k + ' = ' + str(v)
print(message)
# --------------------------------------------------------------------------- #
# CanvasTitle #
# --------------------------------------------------------------------------- #
class CanvasTitle(CanvasComponent):
"""Configuration options for plot titles."""
DEFAULTS = {
'text' : '',
'font_family' : None,
'font_size' : None,
'font_color' : None,
'xref' : 'container',
'yref' : 'container',
'x' : 0.5,
'y' : 'auto',
'xanchor' : 'auto',
'yanchor' : 'auto',
'pad' : {'t':0, 'b': 0, 'l':0}
}
def __init__(self):
self.__parameters = {
'title_text' : '',
'title_font_family' : None,
'title_font_size' : None,
'title_font_color' : None,
'title_xref' : 'container',
'title_yref' : 'container',
'title_x' : 0.5,
'title_y' : 'auto',
'title_xanchor' : 'auto',
'title_yanchor' : 'auto',
'title_pad' : {'t':0, 'b': 0, 'l':0}
}
def reset(self):
self.__parameters = {
'title_text' : self.DEFAULTS['text'],
'title_font_family' : self.DEFAULTS['font_family'],
'title_font_size' : self.DEFAULTS['font_size'],
'title_font_color' : self.DEFAULTS['font_color'],
'title_xref' : self.DEFAULTS['xref'],
'title_yref' : self.DEFAULTS['yref'],
'title_x' : self.DEFAULTS['x'],
'title_y' : self.DEFAULTS['y'],
'title_xanchor' : self.DEFAULTS['xanchor'],
'title_yanchor' : self.DEFAULTS['yanchor'],
'title_pad' : self.DEFAULTS['pad']
}
# ----------------------------------------------------------------------- #
# TITLE TEXT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_text(self):
"""Returns the title_text attribute."""
return self.__parameters['title_text']
@title_text.setter
def title_text(self, value):
"""Sets the title_text attribute."""
self.__parameters['title_text'] = value
# ----------------------------------------------------------------------- #
# TITLE FONT FAMILY PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_font_family(self):
"""Returns the title_font_family attribute."""
return self.__parameters['title_font_family']
@title_font_family.setter
def title_font_family(self, value):
"""Sets the title_font_family attribute."""
self.__parameters['title_font_family'] = value
# ----------------------------------------------------------------------- #
# TITLE FONT SIZE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_font_size(self):
"""Returns the title_font_size attribute."""
return self.__parameters['title_font_size']
@title_font_size.setter
def title_font_size(self, value):
"""Sets the title_font_size attribute."""
if value >= 1 or value is None:
self.__parameters['title_font_size'] = value
else:
raise ValueError("Font size must be greater or equal to 1.")
# ----------------------------------------------------------------------- #
# TITLE FONT COLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_font_color(self):
"""Returns the title_font_color attribute."""
return self.__parameters['title_font_color']
@title_font_color.setter
def title_font_color(self, value):
"""Sets the title_font_color attribute."""
self.__parameters['title_font_color'] = value
# ----------------------------------------------------------------------- #
# TITLE XREF PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_xref(self):
"""Returns the title_xref attribute.
xref may have one of two values, 'container', which spans the entire
width of the plot and 'paper', which refers to the plotting area
only.
"""
return self.__parameters['title_xref']
@title_xref.setter
def title_xref(self, value):
"""Sets the title_xref attribute.
Parameters
----------
xref : str. Default 'container'
xref may have one of two values, 'container', which spans the entire
width of the plot and 'paper', which refers to the plotting area
only.
"""
valid_values = ['container', 'paper']
if value in valid_values:
self.__parameters['title_xref'] = value
else:
raise ValueError("xref must be equal to 'container', or 'paper'. ")
# ----------------------------------------------------------------------- #
# TITLE YREF PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_yref(self):
"""Returns the title_yref attribute.
yref may have one of two values, 'container', which spans the entire
width of the plot and 'paper', which refers to the plotting area
only.
"""
return self.__parameters['title_yref']
@title_yref.setter
def title_yref(self, value):
"""Sets the title_yref attribute.
Parameters
----------
value : str. Default = 'container'
yref may have one of two values, 'container', which spans the entire
height of the plot and 'paper', which refers to the plotting area
only.
"""
valid_values = ['container', 'paper']
if value in valid_values:
self.__parameters['title_yref'] = value
else:
raise ValueError("yref must be equal to 'container', or 'paper'. ")
# ----------------------------------------------------------------------- #
# TITLE X PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_x(self):
"""Returns the title_x attribute.
Specifies the x position with respect to 'xref' in normalized
coordinates from '0' (left) to '1' (right)
"""
return self.__parameters['title_x']
@title_x.setter
def title_x(self, value):
"""Sets the title_x attribute.
Parameters
----------
value : float, Default 0.5
Specifies the x position with respect to 'xref' in normalized
coordinates from '0' (left) to '1' (right)
"""
if value >= 0 and value <= 1:
self.__parameters['title_x'] = value
else:
raise ValueError("x must be between 0 and 1 inclusive.")
# ----------------------------------------------------------------------- #
# TITLE Y PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_y(self):
"""Returns the title_y attribute.
Specifies the y position with respect to 'xref' in normalized
coordinates from '0' (left) to '1' (right). "auto" places
the baseline of the title onto the vertical center of the
top margin.
"""
return self.__parameters['title_y']
@title_y.setter
def title_y(self, value):
"""Sets the title_y attribute.
Parameters
----------
value : float, Default = 'auto'
Specifies the y position with respect to 'xref' in normalized
coordinates from '0' (left) to '1' (right). "auto" places
the baseline of the title onto the vertical center of the
top margin.
"""
if isinstance(value, str):
if value == 'auto':
self.__parameters['title_y'] = value
else:
raise ValueError("title_y must be 'auto' or int between 0 and 1 inclusive.")
elif isinstance(value, (float, int)):
if value >= 0 and value <= 1:
self.__parameters['title_y'] = value
else:
raise ValueError("title_y must be 'auto' or int between 0 and 1 inclusive.")
else:
raise ValueError("title_y must be 'auto' or int between 0 and 1 inclusive.")
# ----------------------------------------------------------------------- #
# TITLE XANCHOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_xanchor(self):
"""Returns the title_xanchor attribute.
Sets the horizontal alignment of the title with respect to the x
position. "left" means that the title starts at x, "right" means
that the title ends at x and "center" means that the title's center
is at x. "auto" divides `xref` by three and calculates the `xanchor`
value automatically based on the value of `x`.
"""
return self.__parameters['title_xanchor']
@title_xanchor.setter
def title_xanchor(self, value):
"""Sets the title_xanchor attribute.
Parameters
----------
value : str, Default 'auto'. One of 'auto', 'left', 'center', 'right'
"left" means that the title starts at x, "right" means that the
title ends at x and "center" means that the title's center is at
x. "auto" divides `xref` by three and calculates the `xanchor`
value automatically based on the value of `x`.
"""
valid_values = ['auto', 'left', 'center', 'right']
if value in valid_values:
self.__parameters['title_xanchor'] = value
else:
raise ValueError("xanchor must be 'auto', 'left', 'center',\
or 'right'.")
# ----------------------------------------------------------------------- #
# TITLE YANCHOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_yanchor(self):
"""Returns the title_yanchor attribute.
Sets the horizontal alignment of the title with respect to the x
position. "left" means that the title starts at x, "right" means
that the title ends at x and "center" means that the title's center
is at x. "auto" divides `xref` by three and calculates the `yanchor`
value automatically based on the value of `x`.
"""
return self.__parameters['title_yanchor']
@title_yanchor.setter
def title_yanchor(self, value):
"""Sets the title_yanchor attribute.
Parameters
----------
value : str, Default 'auto'. One of 'auto', 'top', 'middle', 'bottom'
"top" means that the title's cap line is at y, "bottom"
means that the title's baseline is at y and "middle" means
that the title's midline is at y. "auto" divides `yref` by
three and calculates the `yanchor` value automatically based
on the value of `y`.
"""
valid_values = ['auto', 'top', 'middle', 'bottom']
if value in valid_values:
self.__parameters['title_yanchor'] = value
else:
raise ValueError("yanchor must be 'auto', 'top', 'middle',\
or 'bottom'.")
# ----------------------------------------------------------------------- #
# TITLE PADDING PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def title_pad(self):
"""Returns the title_pad attribute.
Sets the padding of the title via three key/value pairs. The keys are
't' for top, 'b' for bottom, and 'l' for left. Each padding value
applies only when the corresponding `xanchor`/`yanchor` value is
set. For instance, for left padding to take effect, `xanchor` must
be set to "left". The same rule applies if `xanchor`/`yanchor` is
determined automatically. Padding is ignored if the respective
anchor value is "middle"/"center".
"""
return self.__parameters['title_pad']
@title_pad.setter
def title_pad(self, value):
"""Sets the title_pad attribute.
Parameters
----------
value : dict, Default {'t': 0, 'b' : 0, 'l' : 0}
Sets the padding of the title via three key/value pairs. The keys are
't' for top, 'b' for bottom, and 'l' for left. The values are
the amount of padding in pixels. Each padding value
applies only when the corresponding `xanchor`/`yanchor` value is
set. For instance, for left padding to take effect, `xanchor` must
be set to "left". The same rule applies if `xanchor`/`yanchor` is
determined automatically. Padding is ignored if the respective
anchor value is "middle"/"center".
"""
valid_keys = ['t', 'b', 'l']
if isinstance(value, dict):
if all(item in valid_keys for item in value.keys()):
if all(isinstance(v,int) for v in value.values()):
self.__parameters['title_pad'] = value
else:
raise TypeError("Pad values must be integers")
else:
raise KeyError("Pad keys must be 't', 'b', or 'l'.")
else:
raise TypeError("pad must be a dictionary.")
# --------------------------------------------------------------------------- #
# CanvasLegend #
# --------------------------------------------------------------------------- #
class CanvasLegend(CanvasComponent):
"""Configuration options for plot legends."""
DEFAULTS = {
'show' : True,
'bgcolor' : None,
'bordercolor' : '#444',
'borderwidth' : 0,
'font_family' : None,
'font_size' : None,
'font_color' : None,
'orientation' : 'v',
'itemsizing' : 'trace',
'itemclick' : 'toggle',
'x' : 1.02,
'y' : 1,
'xanchor' : 'left',
'yanchor' : 'auto',
'valign' : 'middle'
}
def __init__(self):
self.__parameters = {}
self.__parameters['legend_show'] = True
self.__parameters['legend_bgcolor'] = None
self.__parameters['legend_bordercolor'] = '#444'
self.__parameters['legend_borderwidth'] = 0
self.__parameters['legend_font_family'] = None
self.__parameters['legend_font_size'] = None
self.__parameters['legend_font_color'] = None
self.__parameters['legend_orientation'] = 'v'
self.__parameters['legend_itemsizing'] = 'trace'
self.__parameters['legend_itemclick'] = 'toggle'
self.__parameters['legend_x'] = 1.02
self.__parameters['legend_y'] = 1
self.__parameters['legend_xanchor'] = 'left'
self.__parameters['legend_yanchor'] = 'auto'
self.__parameters['legend_valign'] = 'middle'
def reset(self):
self.__parameters = {}
self.__parameters['legend_show'] = self.DEFAULTS['show']
self.__parameters['legend_bgcolor'] = self.DEFAULTS['bgcolor']
self.__parameters['legend_bordercolor'] = self.DEFAULTS['bordercolor']
self.__parameters['legend_borderwidth'] = self.DEFAULTS['borderwidth']
self.__parameters['legend_font_family'] = self.DEFAULTS['font_family']
self.__parameters['legend_font_size'] = self.DEFAULTS['font_size']
self.__parameters['legend_font_color'] = self.DEFAULTS['font_color']
self.__parameters['legend_orientation'] = self.DEFAULTS['orientation']
self.__parameters['legend_itemsizing'] = self.DEFAULTS['itemsizing']
self.__parameters['legend_itemclick'] = self.DEFAULTS['itemclick']
self.__parameters['legend_x'] = self.DEFAULTS['x']
self.__parameters['legend_y'] = self.DEFAULTS['y']
self.__parameters['legend_xanchor'] = self.DEFAULTS['xanchor']
self.__parameters['legend_yanchor'] = self.DEFAULTS['yanchor']
self.__parameters['legend_valign'] = self.DEFAULTS['valign']
# ----------------------------------------------------------------------- #
# LEGEND SHOW PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_show(self):
"""Returns the legend_show attribute."""
return self.__parameters['legend_show']
@legend_show.setter
def legend_show(self, value):
"""Sets the legend_show attribute."""
if isinstance(value, bool):
self.__parameters['legend_show'] = value
else:
raise TypeError("legend_show must be True or False")
# ----------------------------------------------------------------------- #
# LEGEND BGCOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_bgcolor(self):
"""Returns the legend_bgcolor attribute."""
return self.__parameters['legend_bgcolor']
@legend_bgcolor.setter
def legend_bgcolor(self, value):
"""Sets the legend_bgcolor attribute."""
if isinstance(value, str) or value is None:
self.__parameters['legend_bgcolor'] = value
else:
raise TypeError("legend_bgcolor must be string")
# ----------------------------------------------------------------------- #
# LEGEND BORDER COLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_bordercolor(self):
"""Returns the legend_bordercolor attribute."""
return self.__parameters['legend_bordercolor']
@legend_bordercolor.setter
def legend_bordercolor(self, value):
"""Sets the legend_bordercolor attribute."""
self.__parameters['legend_bordercolor'] = value
# ----------------------------------------------------------------------- #
# LEGEND BORDER WIDTH PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_borderwidth(self):
"""Returns the legend_borderwidth attribute."""
return self.__parameters['legend_borderwidth']
@legend_borderwidth.setter
def legend_borderwidth(self, value):
"""Sets the legend_borderwidth attribute."""
if isinstance(value, int) and value >= 0:
self.__parameters['legend_borderwidth'] = value
elif not isinstance(value, int):
raise TypeError("value must be an integer >= 0")
else:
raise ValueError("value must be an integer >= 0")
# ----------------------------------------------------------------------- #
# LEGEND FONT FAMILY PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_font_family(self):
"""Returns the legend_font_family attribute."""
return self.__parameters['legend_font_family']
@legend_font_family.setter
def legend_font_family(self, value):
"""Sets the legend_font_family attribute."""
self.__parameters['legend_font_family'] = value
# ----------------------------------------------------------------------- #
# LEGEND FONT SIZE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_font_size(self):
"""Returns the legend_font_size attribute."""
return self.__parameters['legend_font_size']
@legend_font_size.setter
def legend_font_size(self, value):
"""Sets the legend_font_size attribute."""
if isinstance(value, int) and value >= 1:
self.__parameters['legend_font_size'] = value
elif not isinstance(value, int):
raise TypeError("value must be an integer >= 1")
else:
raise ValueError("value must be an integer >= 1")
# ----------------------------------------------------------------------- #
# LEGEND FONT COLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_font_color(self):
"""Returns the legend_font_color attribute."""
return self.__parameters['legend_font_color']
@legend_font_color.setter
def legend_font_color(self, value):
"""Sets the legend_font_color attribute."""
self.__parameters['legend_font_color'] = value
# ----------------------------------------------------------------------- #
# LEGEND ORIENTATION PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_orientation(self):
"""Returns the legend_orientation attribute."""
return self.__parameters['legend_orientation']
@legend_orientation.setter
def legend_orientation(self, value):
"""Sets the legend_orientation attribute."""
valid_values = ['v', 'h']
if value in valid_values:
self.__parameters['legend_orientation'] = value
else:
raise ValueError("legend_orientation must be 'v', or 'h'.")
# ----------------------------------------------------------------------- #
# LEGEND ITEMSIZING PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_itemsizing(self):
"""Returns the legend_itemsizing attribute."""
return self.__parameters['legend_itemsizing']
@legend_itemsizing.setter
def legend_itemsizing(self, value):
"""Sets the legend_itemsizing attribute."""
valid_values = ['trace', 'constant']
if value in valid_values:
self.__parameters['legend_itemsizing'] = value
else:
raise ValueError("legend_itemsizing must be 'trace' or 'constant'")
# ----------------------------------------------------------------------- #
# LEGEND ITEMCLICK PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_itemclick(self):
"""Returns the legend_itemclick attribute."""
return self.__parameters['legend_itemclick']
@legend_itemclick.setter
def legend_itemclick(self, value):
"""Sets the legend_itemclick attribute."""
valid_values = ['toggle', 'toggleothers', False]
if value in valid_values:
self.__parameters['legend_itemclick'] = value
else:
raise ValueError("legend_itemclick must be 'toggle', \
'toggleothers' or False")
# ----------------------------------------------------------------------- #
# LEGEND X PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_x(self):
"""Returns the legend_x attribute.
Specifies the x position with respect to 'xref' in normalized
coordinates from '0' (left) to '1' (right)
"""
return self.__parameters['legend_x']
@legend_x.setter
def legend_x(self, value):
"""Sets the legend_x attribute.
Parameters
----------
value : float, Default 0.5
Sets the x position (in normalized coordinates) of the legend.
Defaults to "1.02" for vertical legends and defaults to
"0" for horizontal legends.
"""
if isinstance(value,(int, float)) and value >= -2 and value <= 3:
self.__parameters['legend_x'] = value
elif not isinstance(value, (int, float)):
raise TypeError("legend_x must be a number between -2 and 3 inclusive.")
else:
raise ValueError("legend_x must be a number between -2 and 3 inclusive.")
# ----------------------------------------------------------------------- #
# LEGEND Y PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_y(self):
"""Returns the legend_y attribute.
Specifies the y position with respect to 'yref' in normalized
coordinates from '0' (bottom) to '1' (top).
"""
return self.__parameters['legend_y']
@legend_y.setter
def legend_y(self, value):
"""Sets the legend_y attribute.
Parameters
----------
value : float, Default = 1
Sets the y position (in normalized coordinates) of the legend.
Defaults to "1" for vertical legends, defaults to "-0.1"
for horizontal legends on graphs w/o range sliders and
defaults to "1.1" for horizontal legends on graph with
one or multiple range sliders.
"""
if isinstance(value,(int, float)) and value >= -2 and value <= 3:
self.__parameters['legend_y'] = value
elif not isinstance(value, (int, float)):
raise TypeError("legend_y must be a number between -2 and 3 inclusive.")
else:
raise ValueError("legend_y must be a number between -2 and 3 inclusive.")
# ----------------------------------------------------------------------- #
# LEGEND XANCHOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_xanchor(self):
"""Returns the legend_xanchor attribute.
Sets the legend's horizontal position anchor. This anchor binds the
`x` position to the "left", "center" or "right" of the legend.
Value "auto" anchors legends to the right for `x` values greater
than or equal to 2/3, anchors legends to the left for `x` values
less than or equal to 1/3 and anchors legends with respect to their
center otherwise.
"""
return self.__parameters['legend_xanchor']
@legend_xanchor.setter
def legend_xanchor(self, value):
"""Sets the legend_xanchor attribute.
Parameters
----------
value : str, Default 'left'. One of 'auto', 'left', 'center', 'right'
Sets the legend's horizontal position anchor. This anchor binds the
`x` position to the "left", "center" or "right" of the legend.
Value "auto" anchors legends to the right for `x` values greater
than or equal to 2/3, anchors legends to the left for `x` values
less than or equal to 1/3 and anchors legends with respect to
their center otherwise.
"""
valid_values = ['auto', 'left', 'center', 'right']
if value in valid_values:
self.__parameters['legend_xanchor'] = value
else:
raise ValueError("xanchor must be 'auto', 'left', 'center',\
or 'right'.")
# ----------------------------------------------------------------------- #
# LEGEND YANCHOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_yanchor(self):
"""Returns the legend_yanchor attribute.
Sets the horizontal alignment of the legend with respect to the x
position. "left" means that the legend starts at x, "right" means
that the legend ends at x and "center" means that the legend's center
is at x. "auto" divides `xref` by three and calculates the `yanchor`
value automatically based on the value of `x`.
"""
return self.__parameters['legend_yanchor']
@legend_yanchor.setter
def legend_yanchor(self, value):
"""Sets the legend_yanchor attribute.
Parameters
----------
value : str, Default 'auto'. One of 'auto', 'top', 'middle', 'bottom'
Sets the legend's vertical position anchor This anchor binds the
`y` position to the "top", "middle" or "bottom" of the legend.
Value "auto" anchors legends at their bottom for `y` values
less than or equal to 1/3, anchors legends to at their top
for `y` values greater than or equal to 2/3 and anchors legends
with respect to their middle otherwise.
"""
valid_values = ['auto', 'top', 'middle', 'bottom']
if value in valid_values:
self.__parameters['legend_yanchor'] = value
else:
raise ValueError("yanchor must be 'auto', 'top', 'middle',\
or 'bottom'.")
# ----------------------------------------------------------------------- #
# LEGEND VALIGN PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def legend_valign(self):
"""Returns the legend_valign attribute.
Sets the vertical alignment of the symbols with respect to their
associated text.
"""
return self.__parameters['legend_valign']
@legend_valign.setter
def legend_valign(self, value):
"""Sets the legend_valign attribute.
Parameters
----------
value : str, Default 'auto'. One of 'auto', 'top', 'middle', 'bottom'
Sets the vertical alignment of the symbols with respect to their
associated text.
"""
valid_values = ['top', 'middle', 'bottom']
if value in valid_values:
self.__parameters['legend_valign'] = value
else:
raise ValueError("valign must be 'top', 'middle',\
or 'bottom'.")
# --------------------------------------------------------------------------- #
# CanvasMargins #
# --------------------------------------------------------------------------- #
class CanvasMargins(CanvasComponent):
"""Configuration options for plot margins."""
DEFAULTS = {
'left' : 80,
'top' : 100,
'bottom' : 80,
'pad' : 0
}
def __init__(self):
self.__parameters = {}
self.__parameters['margins_left'] = 80
self.__parameters['margins_top'] = 100
self.__parameters['margins_bottom'] = 80
self.__parameters['margins_pad'] = 0
def reset(self):
self.__parameters = {}
self.__parameters['margins_left'] = self.DEFAULTS['left']
self.__parameters['margins_top'] = self.DEFAULTS['top']
self.__parameters['margins_bottom'] = self.DEFAULTS['bottom']
self.__parameters['margins_pad'] = self.DEFAULTS['pad']
# ----------------------------------------------------------------------- #
# MARGINS_LEFT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def margins_left(self):
"""Returns the margins_left attribute."""
return self.__parameters['margins_left']
@margins_left.setter
def margins_left(self, value):
"""Sets the margins_left attribute."""
if isinstance(value,int) and value >= 0:
self.__parameters['margins_left'] = value
else:
raise ValueError("value must be an integer >= 0")
# ----------------------------------------------------------------------- #
# MARGINS_TOP PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def margins_top(self):
"""Returns the margins_top attribute."""
return self.__parameters['margins_top']
@margins_top.setter
def margins_top(self, value):
"""Sets the margins_top attribute."""
if isinstance(value,int) and value >= 0:
self.__parameters['margins_top'] = value
else:
raise ValueError("value must be an integer >= 0")
# ----------------------------------------------------------------------- #
# MARGINS_BOTTOM PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def margins_bottom(self):
"""Returns the margins_bottom attribute."""
return self.__parameters['margins_bottom']
@margins_bottom.setter
def margins_bottom(self, value):
"""Sets the margins_bottom attribute."""
if isinstance(value,int) and value >= 0:
self.__parameters['margins_bottom'] = value
else:
raise ValueError("value must be an integer >= 0")
# ----------------------------------------------------------------------- #
# MARGINS_PAD PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def margins_pad(self):
"""Returns the margins_pad attribute."""
return self.__parameters['margins_pad']
@margins_pad.setter
def margins_pad(self, value):
"""Sets the margins_pad attribute."""
if isinstance(value,int) and value >= 0:
self.__parameters['margins_pad'] = value
else:
raise ValueError("value must be an integer >= 0")
# --------------------------------------------------------------------------- #
# CanvasSize #
# --------------------------------------------------------------------------- #
class CanvasSize(CanvasComponent):
"""Configuration options for plot size."""
DEFAULTS = {
'autosize' : True,
'width' : 700,
'height' : 450
}
def __init__(self):
self.__parameters = {}
self.__parameters['size_autosize'] = True
self.__parameters['size_width'] = 700
self.__parameters['size_height'] = 450
def reset(self):
self.__parameters = {}
self.__parameters['size_autosize'] = self.DEFAULTS['autosize']
self.__parameters['size_width'] = self.DEFAULTS['width']
self.__parameters['size_height'] = self.DEFAULTS['height']
# ----------------------------------------------------------------------- #
# AUTOSIZE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def size_autosize(self):
"""Returns the size_autosize attribute.
Determines whether or not a layout width or height that has been left
undefined by the user is initialized on each relayout.
"""
return self.__parameters['size_autosize']
@size_autosize.setter
def size_autosize(self, value):
"""Sets the size_autosize attribute.
Parameters
----------
value : bool
Determines whether or not a layout width or height that has
been left undefined by the user is initialized on each relayout.
"""
if isinstance(value, bool):
self.__parameters['size_autosize'] = value
else:
raise ValueError("size_autosize must be True or False.")
# ----------------------------------------------------------------------- #
# WIDTH PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def size_width(self):
"""Returns the size_width attribute."""
return self.__parameters['size_width']
@size_width.setter
def size_width(self, value):
"""Sets the size_width attribute."""
if isinstance(value, (int,float)) and value >= 10:
self.__parameters['size_width'] = value
else:
raise ValueError("Width must be a number greater or equal to 10.")
# ----------------------------------------------------------------------- #
# HEIGHT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def size_height(self):
"""Returns the size_height attribute."""
return self.__parameters['size_height']
@size_height.setter
def size_height(self, value):
"""Sets the size_height attribute."""
if isinstance(value, (int, float)) and value >= 10:
self.__parameters['size_height'] = value
else:
raise ValueError("height must be a number greater or equal to 10.")
# --------------------------------------------------------------------------- #
# CanvasFont #
# --------------------------------------------------------------------------- #
class CanvasFont(CanvasComponent):
"""Configuration options for plot font."""
DEFAULTS = {
'family' : None,
'size' : 12,
'color' : '#444',
'separators' : '.,'
}
def __init__(self):
self.__parameters = {}
self.__parameters['font_family'] = None
self.__parameters['font_size'] = 12
self.__parameters['font_color'] = '#444'
self.__parameters['font_separators'] = '.,'
def reset(self):
self.__parameters = {}
self.__parameters['font_family'] = self.DEFAULTS['family']
self.__parameters['font_size'] = self.DEFAULTS['size']
self.__parameters['font_color'] = self.DEFAULTS['color']
self.__parameters['font_separators'] = self.DEFAULTS['separators']
# ----------------------------------------------------------------------- #
# FONT FAMILY PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def font_family(self):
"""Returns the font_family attribute."""
return self.__parameters['font_family']
@font_family.setter
def font_family(self, value):
"""Sets the font_family attribute."""
self.__parameters['font_family'] = value
# ----------------------------------------------------------------------- #
# FONT SIZE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def font_size(self):
"""Returns the font_size attribute."""
return self.__parameters['font_size']
@font_size.setter
def font_size(self, value):
"""Sets the font_size attribute."""
if isinstance(value, int) and value >= 1:
self.__parameters['font_size'] = value
else:
raise ValueError("value must be a number >= 1")
# ----------------------------------------------------------------------- #
# FONT COLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def font_color(self):
"""Returns the font_color attribute."""
return self.__parameters['font_color']
@font_color.setter
def font_color(self, value):
"""Sets the font_color attribute."""
self.__parameters['font_color'] = value
# ----------------------------------------------------------------------- #
# SEPARATORS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def font_separators(self):
"""Returns the font_separators attribute.
The variable name is misleading since the separator parameter is
for numbers. BTAIM, this parameter sets the decimal and thousand
separators. For example, ".," puts a '.' before decimals and a ','
between thousands.
"""
return self.__parameters['font_separators']
@font_separators.setter
def font_separators(self, value):
"""Sets the font_separators attribute.
Parameters
----------
value : str, Default '.,'
The variable name is misleading since the separator parameter is
for numbers. BTAIM, this parameter sets the decimal and thousand
separators. For example, ".," puts a '.' before decimals and a ','
between thousands.
"""
self.__parameters['font_separators'] = value
# --------------------------------------------------------------------------- #
# CanvasColorBackground #
# --------------------------------------------------------------------------- #
class CanvasColorBackground(CanvasComponent):
"""Configures background colors for paper and plot."""
DEFAULTS = {
'paper_bgcolor' : '#fff',
'plot_bgcolor' : '#fff'
}
def __init__(self):
self.__parameters = {}
self.__parameters['paper_bgcolor'] = '#fff'
self.__parameters['plot_bgcolor'] = '#fff'
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['paper_bgcolor'] = self.DEFAULTS["paper_bgcolor"]
self.__parameters['plot_bgcolor'] = self.DEFAULTS["plot_bgcolor"]
# ----------------------------------------------------------------------- #
# PAPER BGCOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def paper_bgcolor(self):
"""Returns the paper_bgcolor attribute.
Sets the color of paper where the graph is drawn.
"""
return self.__parameters['paper_bgcolor']
@paper_bgcolor.setter
def paper_bgcolor(self, value):
"""Sets the paper_bgcolor attribute.
Parameters
----------
value : str, Default = '#fff'
Sets the color of paper where the graph is drawn.
"""
self.__parameters['paper_bgcolor'] = value
# ----------------------------------------------------------------------- #
# PLOT BGCOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def plot_bgcolor(self):
"""Returns the plot_bgcolor attribute.
Sets the color of plot where the graph is drawn.
"""
return self.__parameters['plot_bgcolor']
@plot_bgcolor.setter
def plot_bgcolor(self, value):
"""Sets the plot_bgcolor attribute.
Parameters
----------
value : str, Default = '#fff'
Sets the color of plot where the graph is drawn.
"""
self.__parameters['plot_bgcolor'] = value
# --------------------------------------------------------------------------- #
# CanvasColorScale #
# --------------------------------------------------------------------------- #
class CanvasColorScale(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'colorscale_sequential' : [[0, 'rgb(220,220,220)'],
[0.2, 'rgb(245,195,157)'],
[0.4, 'rgb(245,160,105)'],
[1, 'rgb(178,10,28)'], ],
'colorscale_sequentialminus' : [[0, 'rgb(5,10,172)'],
[0.35, 'rgb(40,60,190)'],
[0.5, 'rgb(70,100,245)'],
[0.6, 'rgb(90,120,245)'],
[0.7, 'rgb(106,137,247)'],
[1, 'rgb(220,220,220)'], ],
'colorscale_diverging' : [[0, 'rgb(5,10,172)'],
[0.35, 'rgb(106,137,247)'],
[0.5, 'rgb(190,190,190)'],
[0.6, 'rgb(220,170,132)'],
[0.7, 'rgb(230,145,90)'],
[1, 'rgb(178,10,28)'], ],
'colorway' : ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
}
def __init__(self):
self.__parameters = {}
self.__parameters['colorscale_sequential'] = [[0, 'rgb(220,220,220)'], [0.2, 'rgb(245,195,157)'],
[0.4, 'rgb(245,160,105)'], [1, 'rgb(178,10,28)'], ]
self.__parameters['colorscale_sequentialminus'] = [[0, 'rgb(5,10,172)'],
[0.35, 'rgb(40,60,190)'],
[0.5, 'rgb(70,100,245)'],
[0.6, 'rgb(90,120,245)'],
[0.7, 'rgb(106,137,247)'],
[1, 'rgb(220,220,220)'], ]
self.__parameters['colorscale_diverging'] = [[0, 'rgb(5,10,172)'], [0.35, 'rgb(106,137,247)'],
[0.5, 'rgb(190,190,190)'], [0.6, 'rgb(220,170,132)'],
[0.7, 'rgb(230,145,90)'], [1, 'rgb(178,10,28)'], ]
self.__parameters['colorway'] = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['colorscale_sequential'] = self.DEFAULTS["colorscale_sequential"]
self.__parameters['colorscale_sequentialminus'] = self.DEFAULTS["colorscale_sequentialminus"]
self.__parameters['colorscale_diverging'] = self.DEFAULTS["colorscale_diverging"]
self.__parameters['colorway'] = self.DEFAULTS["colorway"]
# ----------------------------------------------------------------------- #
# COLORSCALE SEQUENTIAL PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def colorscale_sequential(self):
"""Returns the colorscale_sequential attribute.
Sets the default sequential colorscale for positive values.
"""
return self.__parameters['colorscale_sequential']
@colorscale_sequential.setter
def colorscale_sequential(self, value):
"""Sets the colorscale_sequential attribute.
Parameters
----------
value : list Default = [[0, rgb(220,220,220)], [0.2, rgb(245,195,157)],
[0.4, rgb(245,160,105)], [1, rgb(178,10,28)], ]
Sets the default sequential colorscale for positive values.
"""
self.__parameters['colorscale_sequential'] = value
# ----------------------------------------------------------------------- #
# COLORSCALE SEQUENTIALMINUS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def colorscale_sequentialminus(self):
"""Returns the colorscale_sequentialminus attribute.
Sets the default sequential colorscale for negative values.
"""
return self.__parameters['colorscale_sequentialminus']
@colorscale_sequentialminus.setter
def colorscale_sequentialminus(self, value):
"""Sets the colorscale_sequentialminus attribute.
Parameters
----------
value : list Default = [[0, rgb(5,10,172)],
[0.35, rgb(40,60,190)],
[0.5, rgb(70,100,245)],
[0.6, rgb(90,120,245)],
[0.7, rgb(106,137,247)],
[1, rgb(220,220,220)], ]
Sets the default sequential colorscale for negative values.
"""
self.__parameters['colorscale_sequentialminus'] = value
# ----------------------------------------------------------------------- #
# COLORSCALE DIVERGING PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def colorscale_diverging(self):
"""Returns the colorscale_diverging attribute.
Sets the default diverging colorscale.
"""
return self.__parameters['colorscale_diverging']
@colorscale_diverging.setter
def colorscale_diverging(self, value):
"""Sets the colorscale_diverging attribute.
Parameters
----------
value : list, Default = [[0, rgb(5,10,172)], [0.35, rgb(106,137,247)],
[0.5, rgb(190,190,190)], [0.6, rgb(220,170,132)],
[0.7, rgb(230,145,90)], [1, rgb(178,10,28)], ]
Sets the default diverging colorscale.
"""
self.__parameters['colorscale_diverging'] = value
# ----------------------------------------------------------------------- #
# COLORWAY PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def colorway(self):
"""Returns the colorway attribute.
Sets the default trace colors.
"""
return self.__parameters['colorway']
@colorway.setter
def colorway(self, value):
"""Sets the colorway attribute.
Parameters
----------
value : list. Default = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
Sets the default trace colors.
"""
self.__parameters['colorway'] = value
# --------------------------------------------------------------------------- #
# CanvasColorAxisDomain #
# --------------------------------------------------------------------------- #
class CanvasColorAxisDomain(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_cauto' : True,
'coloraxis_cmin' : None,
'coloraxis_cmax' : None,
'coloraxis_cmid' : None
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_cauto'] = True
self.__parameters['coloraxis_cmin'] = None
self.__parameters['coloraxis_cmax'] = None
self.__parameters['coloraxis_cmid'] = None
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_cauto'] = self.DEFAULTS["coloraxis_cauto"]
self.__parameters['coloraxis_cmin'] = self.DEFAULTS["coloraxis_cmin"]
self.__parameters['coloraxis_cmax'] = self.DEFAULTS["coloraxis_cmax"]
self.__parameters['coloraxis_cmid'] = self.DEFAULTS["coloraxis_cmid"]
# ----------------------------------------------------------------------- #
# COLORAXIS CAUTO PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_cauto(self):
"""Returns the coloraxis_cauto attribute.
Determines whether or not the color domain is computed with respect
to the input data (here corresponding trace color array(s)) or the
bounds set in `cmin` and `cmax` Defaults to `False` when `cmin`
and `cmax` are set by the user.
"""
return self.__parameters['coloraxis_cauto']
@coloraxis_cauto.setter
def coloraxis_cauto(self, value):
"""Sets the coloraxis_cauto attribute.
Parameters
----------
value : bool Default = True
Determines whether or not the color domain is computed with respect
to the input data (here corresponding trace color array(s)) or the
bounds set in `cmin` and `cmax` Defaults to `False` when `cmin`
and `cmax` are set by the user.
"""
if isinstance(value, bool):
self.__parameters['coloraxis_cauto'] = value
else:
raise TypeError("value must be boolean True or False.")
# ----------------------------------------------------------------------- #
# COLORAXIS CMIN PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_cmin(self):
"""Returns the coloraxis_cmin attribute.
Sets the lower bound of the color domain. Value should have the same
units as corresponding trace color array(s) and if set, `cmax` must
be set as well.
"""
return self.__parameters['coloraxis_cmin']
@coloraxis_cmin.setter
def coloraxis_cmin(self, value):
"""Sets the coloraxis_cmin attribute.
Parameters
----------
value : int
Sets the lower bound of the color domain. Value should have the same
units as corresponding trace color array(s) and if set, `cmax` must
be set as well.
"""
if isinstance(value, (float, int)):
self.__parameters['coloraxis_cmin'] = value
else:
raise TypeError("value must be a number")
# ----------------------------------------------------------------------- #
# COLORAXIS CMAX PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_cmax(self):
"""Returns the coloraxis_cmax attribute.
Sets the upper bound of the color domain. Value should have the same
units as corresponding trace color array(s) and if set, `cmin`
must be set as well.
"""
return self.__parameters['coloraxis_cmax']
@coloraxis_cmax.setter
def coloraxis_cmax(self, value):
"""Sets the coloraxis_cmax attribute.
Parameters
----------
value : int
Sets the upper bound of the color domain. Value should have the same
units as corresponding trace color array(s) and if set, `cmin`
must be set as well.
"""
if isinstance(value, (float, int)):
self.__parameters['coloraxis_cmax'] = value
else:
raise TypeError("value must be a number")
# ----------------------------------------------------------------------- #
# COLORAXIS CMID PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_cmid(self):
"""Returns the coloraxis_cmid attribute.
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as corresponding trace color array(s). Has no effect
when `cauto` is `False`.
"""
return self.__parameters['coloraxis_cmid']
@coloraxis_cmid.setter
def coloraxis_cmid(self, value):
"""Sets the coloraxis_cmid attribute.
Parameters
----------
value : int
Sets the mid-point of the color domain by scaling `cmin` and/or
`cmax` to be equidistant to this point. Value should have the
same units as corresponding trace color array(s). Has no effect
when `cauto` is `False`.
"""
if isinstance(value, (float, int)):
self.__parameters['coloraxis_cmid'] = value
else:
raise TypeError("value must be a number")
# --------------------------------------------------------------------------- #
# CanvasColorAxisScales #
# --------------------------------------------------------------------------- #
class CanvasColorAxisScales(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorscale' : [[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']],
'coloraxis_autoscale' : True,
'coloraxis_reversescale' : True,
'coloraxis_showscale' : True
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorscale'] = [[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]
self.__parameters['coloraxis_autoscale'] = True
self.__parameters['coloraxis_reversescale'] = True
self.__parameters['coloraxis_showscale'] = True
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorscale'] = self.DEFAULTS["coloraxis_colorscale"]
self.__parameters['coloraxis_autoscale'] = self.DEFAULTS["coloraxis_autoscale"]
self.__parameters['coloraxis_reversescale'] = self.DEFAULTS["coloraxis_reversescale"]
self.__parameters['coloraxis_showscale'] = self.DEFAULTS["coloraxis_showscale"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORSCALE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorscale(self):
"""Returns the coloraxis_colorscale attribute.
Sets the colorscale. See `Plotly Colorscale
<https://plot.ly/python/reference/#layout-coloraxis-colorscale>`_
"""
return self.__parameters['coloraxis_colorscale']
@coloraxis_colorscale.setter
def coloraxis_colorscale(self, value):
"""Sets the coloraxis_colorscale attribute.
Parameters
----------
value : list. Default = [[0, rgb(0,0,255)], [1, rgb(255,0,0)]]
Sets the colorscale. See `Plotly Colorscale
<https://plot.ly/python/reference/#layout-coloraxis-colorscale>`_
"""
if isinstance(value, list):
self.__parameters['coloraxis_colorscale'] = value
else:
raise TypeError("value must be a list.")
# ----------------------------------------------------------------------- #
# COLORAXIS AUTOSCALE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_autoscale(self):
"""Returns the coloraxis_autoscale attribute.
Determines whether the colorscale is a default palette
(`autocolorscale: True`) or the palette determined by `colorscale`.
In case `colorscale` is unspecified or `autocolorscale` is True,
the default palette will be chosen according to whether numbers
in the `color` array are all positive, all negative or mixed.
"""
return self.__parameters['coloraxis_autoscale']
@coloraxis_autoscale.setter
def coloraxis_autoscale(self, value):
"""Sets the coloraxis_autoscale attribute.
Parameters
----------
value : bool. Default = True
Determines whether the colorscale is a default palette
(`autocolorscale: True`) or the palette determined by `colorscale`.
In case `colorscale` is unspecified or `autocolorscale` is True,
the default palette will be chosen according to whether numbers
in the `color` array are all positive, all negative or mixed.
"""
if isinstance(value, bool):
self.__parameters['coloraxis_autoscale'] = value
else:
raise TypeError("value must be boolean True or False.")
# ----------------------------------------------------------------------- #
# COLORAXIS REVERSESCALE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_reversescale(self):
"""Returns the coloraxis_reversescale attribute.
Reverses the color mapping if True. If True, `cmin` will correspond
to the last color in the array and `cmax` will correspond to the first
color.
"""
return self.__parameters['coloraxis_reversescale']
@coloraxis_reversescale.setter
def coloraxis_reversescale(self, value):
"""Sets the coloraxis_reversescale attribute.
Parameters
----------
value : bool. Default = True
Reverses the color mapping if True. If True, `cmin` will correspond
to the last color in the array and `cmax` will correspond to the first
color.
"""
if isinstance(value, bool):
self.__parameters['coloraxis_reversescale'] = value
else:
raise TypeError("value must be boolean True or False.")
# ----------------------------------------------------------------------- #
# COLORAXIS SHOWSCALE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_showscale(self):
"""Returns the coloraxis_showscale attribute.
Determines whether or not a colorbar is displayed for this trace.
"""
return self.__parameters['coloraxis_showscale']
@coloraxis_showscale.setter
def coloraxis_showscale(self, value):
"""Sets the coloraxis_showscale attribute.
Parameters
----------
value : bool. Default = True
Determines whether or not a colorbar is displayed for this trace.
"""
if isinstance(value, bool):
self.__parameters['coloraxis_showscale'] = value
else:
raise TypeError("value must be boolean True or False.")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarStyle #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarStyle(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_thicknessmode' : 'pixels',
'coloraxis_colorbar_thickness' : 30,
'coloraxis_colorbar_lenmode' : 'fraction',
'coloraxis_colorbar_len' : 1,
'coloraxis_colorbar_bgcolor' : "rgba(0,0,0,0)"
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_thicknessmode'] = 'pixels'
self.__parameters['coloraxis_colorbar_thickness'] = 30
self.__parameters['coloraxis_colorbar_lenmode'] = 'fraction'
self.__parameters['coloraxis_colorbar_len'] = 1
self.__parameters['coloraxis_colorbar_bgcolor'] = "rgba(0,0,0,0)"
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_thicknessmode'] = self.DEFAULTS["coloraxis_colorbar_thicknessmode"]
self.__parameters['coloraxis_colorbar_thickness'] = self.DEFAULTS["coloraxis_colorbar_thickness"]
self.__parameters['coloraxis_colorbar_lenmode'] = self.DEFAULTS["coloraxis_colorbar_lenmode"]
self.__parameters['coloraxis_colorbar_len'] = self.DEFAULTS["coloraxis_colorbar_len"]
self.__parameters['coloraxis_colorbar_bgcolor'] = self.DEFAULTS["coloraxis_colorbar_bgcolor"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR THICKNESSMODE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_thicknessmode(self):
"""Returns the coloraxis_colorbar_thicknessmode attribute.
Determines whether this color bar's thickness (i.e. the measure in
the constant color direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the value.
"""
return self.__parameters['coloraxis_colorbar_thicknessmode']
@coloraxis_colorbar_thicknessmode.setter
def coloraxis_colorbar_thicknessmode(self, value):
"""Sets the coloraxis_colorbar_thicknessmode attribute.
Parameters
----------
value : str. One of 'fraction' or 'pixels'. Default = 'pixels'.
Determines whether this color bar's thickness (i.e. the measure in
the constant color direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the value.
"""
valid_values = ['fraction', 'pixels']
if value in valid_values:
self.__parameters['coloraxis_colorbar_thicknessmode'] = value
else:
raise ValueError("colorbar_thicknessmode must be either 'fraction'\
or 'pixels'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR THICKNESS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_thickness(self):
"""Returns the coloraxis_colorbar_thickness attribute.
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
"""
return self.__parameters['coloraxis_colorbar_thickness']
@coloraxis_colorbar_thickness.setter
def coloraxis_colorbar_thickness(self, value):
"""Sets the coloraxis_colorbar_thickness attribute.
Parameters
----------
value : int Default = 30
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_thickness'] = value
else:
raise ValueError("colorbar_thickness must be an integer >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR LENMODE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_lenmode(self):
"""Returns the coloraxis_colorbar_lenmode attribute.
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot "fraction"
or in "pixels. Use `len` to set the value.
"""
return self.__parameters['coloraxis_colorbar_lenmode']
@coloraxis_colorbar_lenmode.setter
def coloraxis_colorbar_lenmode(self, value):
"""Sets the coloraxis_colorbar_lenmode attribute.
Parameters
----------
value : str. One of 'fraction' or 'pixels'. Default = 'pixels'.
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot "fraction"
or in "pixels. Use `len` to set the value.
"""
valid_values = ['fraction', 'pixels']
if value in valid_values:
self.__parameters['coloraxis_colorbar_lenmode'] = value
else:
raise ValueError("colorbar_lenmode must be either 'fraction', \
or 'pixels'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR LEN PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_len(self):
"""Returns the coloraxis_colorbar_len attribute.
Sets the length of the color bar This measure excludes the padding
of both ends. That is, the color bar length is this length minus
the padding on both ends.
"""
return self.__parameters['coloraxis_colorbar_len']
@coloraxis_colorbar_len.setter
def coloraxis_colorbar_len(self, value):
"""Sets the coloraxis_colorbar_len attribute.
Parameters
----------
value : int. Default = 1
Sets the length of the color bar This measure excludes the padding
of both ends. That is, the color bar length is this length minus
the padding on both ends.
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_len'] = value
else:
raise ValueError("colorbar_len must be an integer >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR BGCOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_bgcolor(self):
"""Returns the coloraxis_colorbar_bgcolor attribute.
Sets the color of padded area.
"""
return self.__parameters['coloraxis_colorbar_bgcolor']
@coloraxis_colorbar_bgcolor.setter
def coloraxis_colorbar_bgcolor(self, value):
"""Sets the coloraxis_colorbar_bgcolor attribute.
Parameters
----------
value : color. Default = "rgba(0,0,0,0)"
Sets the color of padded area.
"""
if isinstance(value, str):
self.__parameters['coloraxis_colorbar_bgcolor'] = value
else:
raise TypeError("value must be a string")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarPosition #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarPosition(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_x' : 1.02,
'coloraxis_colorbar_y' : 0.5,
'coloraxis_colorbar_xanchor' : 'left',
'coloraxis_colorbar_yanchor' : 'middle',
'coloraxis_colorbar_xpad' : 10,
'coloraxis_colorbar_ypad' : 10
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_x'] = 1.02
self.__parameters['coloraxis_colorbar_y'] = 0.5
self.__parameters['coloraxis_colorbar_xanchor'] = 'left'
self.__parameters['coloraxis_colorbar_yanchor'] = 'middle'
self.__parameters['coloraxis_colorbar_xpad'] = 10
self.__parameters['coloraxis_colorbar_ypad'] = 10
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_x'] = self.DEFAULTS["coloraxis_colorbar_x"]
self.__parameters['coloraxis_colorbar_y'] = self.DEFAULTS["coloraxis_colorbar_y"]
self.__parameters['coloraxis_colorbar_xanchor'] = self.DEFAULTS["coloraxis_colorbar_xanchor"]
self.__parameters['coloraxis_colorbar_yanchor'] = self.DEFAULTS["coloraxis_colorbar_yanchor"]
self.__parameters['coloraxis_colorbar_xpad'] = self.DEFAULTS["coloraxis_colorbar_xpad"]
self.__parameters['coloraxis_colorbar_ypad'] = self.DEFAULTS["coloraxis_colorbar_ypad"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR X PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_x(self):
"""Returns the coloraxis_colorbar_x attribute.
Sets the x position of the color bar (in plot fraction).
"""
return self.__parameters['coloraxis_colorbar_x']
@coloraxis_colorbar_x.setter
def coloraxis_colorbar_x(self, value):
"""Sets the coloraxis_colorbar_x attribute.
Parameters
----------
value : int between -2 and 3. Default = 1.02
Sets the x position of the color bar (in plot fraction).
"""
if isinstance(value, (int, float)) and value >= -2 and value <= 3:
self.__parameters['coloraxis_colorbar_x'] = value
else:
raise ValueError("colorbar_x must be an integer between \
-2 and 3 inclusive.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR Y PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_y(self):
"""Returns the coloraxis_colorbar_y attribute.
Sets the x position of the color bar (in plot fraction).
"""
return self.__parameters['coloraxis_colorbar_y']
@coloraxis_colorbar_y.setter
def coloraxis_colorbar_y(self, value):
"""Sets the coloraxis_colorbar_y attribute.
Parameters
----------
value : int between -2 and 3. Default = 0.5
Sets the x position of the color bar (in plot fraction).
"""
if isinstance(value, (int, float)) and value >= -2 and value <= 3:
self.__parameters['coloraxis_colorbar_y'] = value
else:
raise ValueError("colorbar_y must be an integer between \
-2 and 3 inclusive.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR XANCHOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_xanchor(self):
"""Returns the coloraxis_colorbar_xanchor attribute.
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of the
color bar.
"""
return self.__parameters['coloraxis_colorbar_xanchor']
@coloraxis_colorbar_xanchor.setter
def coloraxis_colorbar_xanchor(self, value):
"""Sets the coloraxis_colorbar_xanchor attribute.
Parameters
----------
value : str. One of 'left', 'center', 'right'. Default = 'left'
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of the
color bar.
"""
valid_values = ['left', 'center', 'right']
if value in valid_values:
self.__parameters['coloraxis_colorbar_xanchor'] = value
else:
raise ValueError("colorbar_xanchor must be either 'left', \
'center', or 'right'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR YANCHOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_yanchor(self):
"""Returns the coloraxis_colorbar_yanchor attribute.
Sets this color bar's vertical position anchor This anchor binds
the `y` position to the "top", "middle" or "bottom" of the color bar.
"""
return self.__parameters['coloraxis_colorbar_yanchor']
@coloraxis_colorbar_yanchor.setter
def coloraxis_colorbar_yanchor(self, value):
"""Sets the coloraxis_colorbar_yanchor attribute.
Parameters
----------
value : str. One of 'top', 'middle', 'bottom'. Default = 'middle'
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom"
of the color bar.
"""
valid_values = ['middle', 'bottom', 'top']
if value in valid_values:
self.__parameters['coloraxis_colorbar_yanchor'] = value
else:
raise ValueError("colorbar_yanchor must be either 'middle', \
'bottom', or 'top'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR XPAD PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_xpad(self):
"""Returns the coloraxis_colorbar_xpad attribute.
Sets the amount of padding (in px) along the x direction.
"""
return self.__parameters['coloraxis_colorbar_xpad']
@coloraxis_colorbar_xpad.setter
def coloraxis_colorbar_xpad(self, value):
"""Sets the coloraxis_colorbar_xpad attribute.
Parameters
----------
value : int. Default = 10
Sets the amount of padding (in px) along the x direction.
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_xpad'] = value
else:
raise ValueError("colorbar_xpad must be an integer >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR YPAD PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_ypad(self):
"""Returns the coloraxis_colorbar_ypad attribute.
Sets the amount of padding (in px) along the y direction.
"""
return self.__parameters['coloraxis_colorbar_ypad']
@coloraxis_colorbar_ypad.setter
def coloraxis_colorbar_ypad(self, value):
"""Sets the coloraxis_colorbar_ypad attribute.
Parameters
----------
value : int. Default = 10
Sets the amount of padding (in px) along the y direction.
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_ypad'] = value
else:
raise ValueError("colorbar_ypad must be an integer >= 0.")
self.__parameters['coloraxis_colorbar_ypad'] = value
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarBoundary #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarBoundary(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_outlinecolor' : '#444',
'coloraxis_colorbar_outlinewidth' : 1,
'coloraxis_colorbar_bordercolor' : '#444',
'coloraxis_colorbar_borderwidth' : 0
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_outlinecolor'] = '#444'
self.__parameters['coloraxis_colorbar_outlinewidth'] = 1
self.__parameters['coloraxis_colorbar_bordercolor'] = '#444'
self.__parameters['coloraxis_colorbar_borderwidth'] = 0
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_outlinecolor'] = self.DEFAULTS["coloraxis_colorbar_outlinecolor"]
self.__parameters['coloraxis_colorbar_outlinewidth'] = self.DEFAULTS["coloraxis_colorbar_outlinewidth"]
self.__parameters['coloraxis_colorbar_bordercolor'] = self.DEFAULTS["coloraxis_colorbar_bordercolor"]
self.__parameters['coloraxis_colorbar_borderwidth'] = self.DEFAULTS["coloraxis_colorbar_borderwidth"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR OUTLINECOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_outlinecolor(self):
"""Returns the coloraxis_colorbar_outlinecolor attribute.
Sets the axis line color.
"""
return self.__parameters['coloraxis_colorbar_outlinecolor']
@coloraxis_colorbar_outlinecolor.setter
def coloraxis_colorbar_outlinecolor(self, value):
"""Sets the coloraxis_colorbar_outlinecolor attribute.
Parameters
----------
value : str. Default = '#444'
Sets the axis line color.
"""
if isinstance(value, str):
self.__parameters['coloraxis_colorbar_outlinecolor'] = value
else:
raise TypeError("value must be a string.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR OUTLINEWIDTH PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_outlinewidth(self):
"""Returns the coloraxis_colorbar_outlinewidth attribute.
Sets the width (in px) of the axis line.
"""
return self.__parameters['coloraxis_colorbar_outlinewidth']
@coloraxis_colorbar_outlinewidth.setter
def coloraxis_colorbar_outlinewidth(self, value):
"""Sets the coloraxis_colorbar_outlinewidth attribute.
Parameters
----------
value : int greater than or equal to 0. Default = 1
Sets the width (in px) of the axis line.
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_outlinewidth'] = value
else:
raise ValueError("colorbar_outlinewidth must be an integer >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR BORDERCOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_bordercolor(self):
"""Returns the coloraxis_colorbar_bordercolor attribute.
Sets the axis line color.
"""
return self.__parameters['coloraxis_colorbar_bordercolor']
@coloraxis_colorbar_bordercolor.setter
def coloraxis_colorbar_bordercolor(self, value):
"""Sets the coloraxis_colorbar_bordercolor attribute.
Parameters
----------
value : str. Default = '#444'
Sets the axis line color.
"""
if isinstance(value, str):
self.__parameters['coloraxis_colorbar_bordercolor'] = value
else:
raise TypeError("value must be a string.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR BORDERWIDTH PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_borderwidth(self):
"""Returns the coloraxis_colorbar_borderwidth attribute.
Sets the axis line color.
"""
return self.__parameters['coloraxis_colorbar_borderwidth']
@coloraxis_colorbar_borderwidth.setter
def coloraxis_colorbar_borderwidth(self, value):
"""Sets the coloraxis_colorbar_borderwidth attribute.
Parameters
----------
value : int greater than or equal to 0. Default = 0
Sets the axis line color.
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_borderwidth'] = value
else:
raise ValueError("colorbar_borderwidth must be an integer >= 0.")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarTicks #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarTicks(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_tickmode' : "array",
'coloraxis_colorbar_nticks' : 0,
'coloraxis_colorbar_tick0' : None,
'coloraxis_colorbar_dtick' : None,
'coloraxis_colorbar_tickvals' : None,
'coloraxis_colorbar_ticktext' : [""],
'coloraxis_colorbar_ticks' : None
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_tickmode'] = "array"
self.__parameters['coloraxis_colorbar_nticks'] = 0
self.__parameters['coloraxis_colorbar_tick0'] = None
self.__parameters['coloraxis_colorbar_dtick'] = None
self.__parameters['coloraxis_colorbar_tickvals'] = None
self.__parameters['coloraxis_colorbar_ticktext'] = [""]
self.__parameters['coloraxis_colorbar_ticks'] = None
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_tickmode'] = self.DEFAULTS["coloraxis_colorbar_tickmode"]
self.__parameters['coloraxis_colorbar_nticks'] = self.DEFAULTS["coloraxis_colorbar_nticks"]
self.__parameters['coloraxis_colorbar_tick0'] = self.DEFAULTS["coloraxis_colorbar_tick0"]
self.__parameters['coloraxis_colorbar_dtick'] = self.DEFAULTS["coloraxis_colorbar_dtick"]
self.__parameters['coloraxis_colorbar_tickvals'] = self.DEFAULTS["coloraxis_colorbar_tickvals"]
self.__parameters['coloraxis_colorbar_ticktext'] = self.DEFAULTS["coloraxis_colorbar_ticktext"]
self.__parameters['coloraxis_colorbar_ticks'] = self.DEFAULTS["coloraxis_colorbar_ticks"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKMODE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickmode(self):
"""Returns the coloraxis_colorbar_tickmode attribute.
Sets the tick mode for this axis. If "auto", the number of ticks is
set via `nticks`. If "linear", the placement of the ticks is
determined by a starting position `tick0` and a tick step `dtick`
("linear" is the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via `tickvals` and the
tick text is `ticktext`. ("array" is the default value if
`tickvals` is provided).
"""
return self.__parameters['coloraxis_colorbar_tickmode']
@coloraxis_colorbar_tickmode.setter
def coloraxis_colorbar_tickmode(self, value):
"""Sets the coloraxis_colorbar_tickmode attribute.
Parameters
----------
value : str. One of 'auto', 'linear', or 'array'. Default = "array"
Sets the tick mode for this axis. If "auto", the number of ticks is
set via `nticks`. If "linear", the placement of the ticks is
determined by a starting position `tick0` and a tick step `dtick`
("linear" is the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via `tickvals` and the
tick text is `ticktext`. ("array" is the default value if
`tickvals` is provided).
"""
valid_values = ['auto', 'linear', 'array']
if value in valid_values:
self.__parameters['coloraxis_colorbar_tickmode'] = value
else:
raise ValueError("'colorbar_tickmode' must be either \
'auto', 'linear', oir 'array'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR NTICKS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_nticks(self):
"""Returns the coloraxis_colorbar_nticks attribute.
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be less
than or equal to `nticks`. Has an effect only if `tickmode` is set
to "auto".
"""
return self.__parameters['coloraxis_colorbar_nticks']
@coloraxis_colorbar_nticks.setter
def coloraxis_colorbar_nticks(self, value):
"""Sets the coloraxis_colorbar_nticks attribute.
Parameters
----------
value : int greater than or equal to 0. Default = 0
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be less
than or equal to `nticks`. Has an effect only if `tickmode` is set
to "auto".
"""
if isinstance(value, (int, float)) and value >= 0:
self.__parameters['coloraxis_colorbar_nticks'] = value
else:
raise ValueError("colorbar_nticks must be a number >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICK0 PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tick0(self):
"""Returns the coloraxis_colorbar_tick0 attribute.
Sets the placement of the first tick on this axis. Use with `dtick`.
If the axis `type` is "log", then you must take the log of your
starting tick (e.g. to set the starting tick to 100, set the `tick0`
to 2) except when `dtick`="L<f>" (see `dtick` for more info). If
the axis `type` is "date", it should be a date string, like date
data. If the axis `type` is "category", it should be a number,
using the scale where each category is assigned a serial number
from zero in the order it appears.
"""
return self.__parameters['coloraxis_colorbar_tick0']
@coloraxis_colorbar_tick0.setter
def coloraxis_colorbar_tick0(self, value):
"""Sets the coloraxis_colorbar_tick0 attribute.
Parameters
----------
value : int or str
Sets the placement of the first tick on this axis. Use with `dtick`.
If the axis `type` is "log", then you must take the log of your
starting tick (e.g. to set the starting tick to 100, set the `tick0`
to 2) except when `dtick`="L<f>" (see `dtick` for more info). If
the axis `type` is "date", it should be a date string, like date
data. If the axis `type` is "category", it should be a number,
using the scale where each category is assigned a serial number
from zero in the order it appears.
"""
self.__parameters['coloraxis_colorbar_tick0'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR DTICK PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_dtick(self):
"""Returns the coloraxis_colorbar_dtick attribute.
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to "log"
and "date" axes. If the axis `type` is "log", then ticks are set
every 10^(n"dtick) where n is the tick number. For example, to set
a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at
1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433.
"log" has several special values; "L<f>", where `f` is a positive
number, gives ticks linearly spaced in value (but not position).
For example `tick0`'] = 0.1, `dtick`'] = "L0.5" will put ticks at 0.1,
0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between,
use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for
"D1" and "D2". If the axis `type` is "date", then you must convert
the time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has special
values "M<n>" gives ticks spaced by a number of months. `n` must be
a positive integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every
4 years, set `dtick` to "M48"
"""
return self.__parameters['coloraxis_colorbar_dtick']
@coloraxis_colorbar_dtick.setter
def coloraxis_colorbar_dtick(self, value):
"""Sets the coloraxis_colorbar_dtick attribute.
Parameters
----------
value : int or str
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to "log"
and "date" axes. If the axis `type` is "log", then ticks are set
every 10^(n"dtick) where n is the tick number. For example, to set
a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at
1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433.
"log" has several special values; "L<f>", where `f` is a positive
number, gives ticks linearly spaced in value (but not position).
For example `tick0`'] = 0.1, `dtick`'] = "L0.5" will put ticks at 0.1,
0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between,
use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for
"D1" and "D2". If the axis `type` is "date", then you must convert
the time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has special
values "M<n>" gives ticks spaced by a number of months. `n` must be
a positive integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every
4 years, set `dtick` to "M48"
"""
self.__parameters['coloraxis_colorbar_dtick'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKVALS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickvals(self):
"""Returns the coloraxis_colorbar_tickvals attribute.
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`
"""
return self.__parameters['coloraxis_colorbar_tickvals']
@coloraxis_colorbar_tickvals.setter
def coloraxis_colorbar_tickvals(self, value):
"""Sets the coloraxis_colorbar_tickvals attribute.
Parameters
----------
value : array-like numbers, strings, or datetimes.
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`
"""
self.__parameters['coloraxis_colorbar_tickvals'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKTEXT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_ticktext(self):
"""Returns the coloraxis_colorbar_ticktext attribute.
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array".
Used with `tickvals`.
"""
return self.__parameters['coloraxis_colorbar_ticktext']
@coloraxis_colorbar_ticktext.setter
def coloraxis_colorbar_ticktext(self, value):
"""Sets the coloraxis_colorbar_ticktext attribute.
Parameters
----------
value : array-like numbers, strings, or datetimes.
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array".
Used with `tickvals`.
"""
if isinstance(value, (pd.Series, np.ndarray, list, tuple)):
self.__parameters['coloraxis_colorbar_ticktext'] = value
else:
raise TypeError("value must be an array, tuple, list, numpy array \
or pandas Series.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_ticks(self):
"""Returns the coloraxis_colorbar_ticks attribute.
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
"""
return self.__parameters['coloraxis_colorbar_ticks']
@coloraxis_colorbar_ticks.setter
def coloraxis_colorbar_ticks(self, value):
"""Sets the coloraxis_colorbar_ticks attribute.
Parameters
----------
value : str. One of 'outside', 'inside', and "". Default = ""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
"""
valid_values = ['outside', 'inside', ""]
if value in valid_values:
self.__parameters['coloraxis_colorbar_ticks'] = value
else:
raise ValueError("colorbar_ticks must be either 'outside', \
'inside', or ''.")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarTickStyle #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarTickStyle(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_ticklen' : 5,
'coloraxis_colorbar_tickwidth' : 1,
'coloraxis_colorbar_tickcolor' : '#444',
'coloraxis_colorbar_showticklabels' : True,
'coloraxis_colorbar_tickangle' : "",
'coloraxis_colorbar_tickprefix' : '',
'coloraxis_colorbar_showtickprefix' : 'all',
'coloraxis_colorbar_ticksuffix' : '',
'coloraxis_colorbar_showticksuffix' : 'all'
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_ticklen'] = 5
self.__parameters['coloraxis_colorbar_tickwidth'] = 1
self.__parameters['coloraxis_colorbar_tickcolor'] = '#444'
self.__parameters['coloraxis_colorbar_showticklabels'] = True
self.__parameters['coloraxis_colorbar_tickangle'] = ""
self.__parameters['coloraxis_colorbar_tickprefix'] = ''
self.__parameters['coloraxis_colorbar_showtickprefix'] = 'all'
self.__parameters['coloraxis_colorbar_ticksuffix'] = ''
self.__parameters['coloraxis_colorbar_showticksuffix'] = 'all'
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_ticklen'] = self.DEFAULTS["coloraxis_colorbar_ticklen"]
self.__parameters['coloraxis_colorbar_tickwidth'] = self.DEFAULTS["coloraxis_colorbar_tickwidth"]
self.__parameters['coloraxis_colorbar_tickcolor'] = self.DEFAULTS["coloraxis_colorbar_tickcolor"]
self.__parameters['coloraxis_colorbar_showticklabels'] = self.DEFAULTS["coloraxis_colorbar_showticklabels"]
self.__parameters['coloraxis_colorbar_tickangle'] = self.DEFAULTS["coloraxis_colorbar_tickangle"]
self.__parameters['coloraxis_colorbar_tickprefix'] = self.DEFAULTS["coloraxis_colorbar_tickprefix"]
self.__parameters['coloraxis_colorbar_showtickprefix'] = self.DEFAULTS["coloraxis_colorbar_showtickprefix"]
self.__parameters['coloraxis_colorbar_ticksuffix'] = self.DEFAULTS["coloraxis_colorbar_ticksuffix"]
self.__parameters['coloraxis_colorbar_showticksuffix'] = self.DEFAULTS["coloraxis_colorbar_showticksuffix"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKLEN PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_ticklen(self):
"""Returns the coloraxis_colorbar_ticklen attribute.
Sets the tick length (in px).
"""
return self.__parameters['coloraxis_colorbar_ticklen']
@coloraxis_colorbar_ticklen.setter
def coloraxis_colorbar_ticklen(self, value):
"""Sets the coloraxis_colorbar_ticklen attribute.
Parameters
----------
value : int >= 0. Default = 5
Sets the tick length (in px).
"""
if isinstance(value, int) and value >= 0:
self.__parameters['coloraxis_colorbar_ticklen'] = value
else:
raise ValueError("colorbar_ticklen must be an integer >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKWIDTH PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickwidth(self):
"""Returns the coloraxis_colorbar_tickwidth attribute.
Sets the tick length (in px).
"""
return self.__parameters['coloraxis_colorbar_tickwidth']
@coloraxis_colorbar_tickwidth.setter
def coloraxis_colorbar_tickwidth(self, value):
"""Sets the coloraxis_colorbar_tickwidth attribute.
Parameters
----------
value : int >= 0. Default = 1
Sets the tick length (in px).
"""
if isinstance(value, int) and value >= 0:
self.__parameters['coloraxis_colorbar_tickwidth'] = value
else:
raise ValueError("colorbar_tickwidth must be an integer >= 0.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKCOLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickcolor(self):
"""Returns the coloraxis_colorbar_tickcolor attribute.
Sets the tick color.
"""
return self.__parameters['coloraxis_colorbar_tickcolor']
@coloraxis_colorbar_tickcolor.setter
def coloraxis_colorbar_tickcolor(self, value):
"""Sets the coloraxis_colorbar_tickcolor attribute.
Parameters
----------
value : str
Sets the tick color.
"""
self.__parameters['coloraxis_colorbar_tickcolor'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR SHOWTICKLABELS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_showticklabels(self):
"""Returns the coloraxis_colorbar_showticklabels attribute.
Determines whether or not the tick labels are drawn.
"""
return self.__parameters['coloraxis_colorbar_showticklabels']
@coloraxis_colorbar_showticklabels.setter
def coloraxis_colorbar_showticklabels(self, value):
"""Sets the coloraxis_colorbar_showticklabels attribute.
Parameters
----------
value : bool. Default = True
Determines whether or not the tick labels are drawn.
"""
if isinstance(value, bool):
self.__parameters['coloraxis_colorbar_showticklabels'] = value
else:
raise TypeError("value must be a boolean, True or False.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKANGLE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickangle(self):
"""Returns the coloraxis_colorbar_tickangle attribute.
Sets tick angle.
"""
return self.__parameters['coloraxis_colorbar_tickangle']
@coloraxis_colorbar_tickangle.setter
def coloraxis_colorbar_tickangle(self, value):
"""Sets the coloraxis_colorbar_tickangle attribute.
Parameters
----------
value : int, float
Sets tick angle.
"""
if isinstance(value, (float, int)):
self.__parameters['coloraxis_colorbar_tickangle'] = value
else:
raise TypeError("value must be 'auto' or a number.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKPREFIX PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickprefix(self):
"""Returns the coloraxis_colorbar_tickprefix attribute.
Sets a tick label prefix
"""
return self.__parameters['coloraxis_colorbar_tickprefix']
@coloraxis_colorbar_tickprefix.setter
def coloraxis_colorbar_tickprefix(self, value):
"""Sets the coloraxis_colorbar_tickprefix attribute.
Parameters
----------
value : str
Sets a tick label prefix
"""
self.__parameters['coloraxis_colorbar_tickprefix'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR SHOWTICKPREFIX PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_showtickprefix(self):
"""Returns the coloraxis_colorbar_showtickprefix attribute.
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix.
If "last", only the last tick is displayed with a suffix.
If "none", tick prefixes are hidden.
"""
return self.__parameters['coloraxis_colorbar_showtickprefix']
@coloraxis_colorbar_showtickprefix.setter
def coloraxis_colorbar_showtickprefix(self, value):
"""Sets the coloraxis_colorbar_showtickprefix attribute.
Parameters
----------
value : str
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix.
If "last", only the last tick is displayed with a suffix.
If "none", tick prefixes are hidden.
"""
valid_values = ['all', 'first', 'last', 'none']
if value in valid_values:
self.__parameters['coloraxis_colorbar_showtickprefix'] = value
else:
raise ValueError("showtickprefix must be 'all', 'first', 'last'\
, or 'none'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKSUFFIX PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_ticksuffix(self):
"""Returns the coloraxis_colorbar_ticksuffix attribute.
Sets a tick label suffix
"""
return self.__parameters['coloraxis_colorbar_ticksuffix']
@coloraxis_colorbar_ticksuffix.setter
def coloraxis_colorbar_ticksuffix(self, value):
"""Sets the coloraxis_colorbar_ticksuffix attribute.
Parameters
----------
value : str
Sets a tick label suffix
"""
self.__parameters['coloraxis_colorbar_ticksuffix'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR SHOWTICKSUFFIX PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_showticksuffix(self):
"""Returns the coloraxis_colorbar_showticksuffix attribute.
If "all", all tick labels are displayed with a suffix. If
"first", only the first tick is displayed with a suffix.
If "last", only the last tick is displayed with a suffix.
If "none", tick suffixes are hidden.
"""
return self.__parameters['coloraxis_colorbar_showticksuffix']
@coloraxis_colorbar_showticksuffix.setter
def coloraxis_colorbar_showticksuffix(self, value):
"""Sets the coloraxis_colorbar_showticksuffix attribute.
Parameters
----------
value : str
If "all", all tick labels are displayed with a suffix. If
"first", only the first tick is displayed with a suffix.
If "last", only the last tick is displayed with a suffix.
If "none", tick suffixes are hidden.
"""
valid_values = ['all', 'first', 'last', 'none']
if value in valid_values:
self.__parameters['coloraxis_colorbar_showticksuffix'] = value
else:
raise ValueError("showticksuffix must be 'all', 'first', 'last'\
, or 'none'.")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarTickFont #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarTickFont(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_tickfont_family' : None,
'coloraxis_colorbar_tickfont_size' : 1,
'coloraxis_colorbar_tickfont_color' : None
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_tickfont_family'] = None
self.__parameters['coloraxis_colorbar_tickfont_size'] = 1
self.__parameters['coloraxis_colorbar_tickfont_color'] = None
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_tickfont_family'] = self.DEFAULTS["coloraxis_colorbar_tickfont_family"]
self.__parameters['coloraxis_colorbar_tickfont_size'] = self.DEFAULTS["coloraxis_colorbar_tickfont_size"]
self.__parameters['coloraxis_colorbar_tickfont_color'] = self.DEFAULTS["coloraxis_colorbar_tickfont_color"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKFONT_FAMILY PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickfont_family(self):
"""Returns the coloraxis_colorbar_tickfont_family attribute.
Sets tick font family.
"""
return self.__parameters['coloraxis_colorbar_tickfont_family']
@coloraxis_colorbar_tickfont_family.setter
def coloraxis_colorbar_tickfont_family(self, value):
"""Sets the coloraxis_colorbar_tickfont_family attribute.
Parameters
----------
value : str
Sets tick font family.
"""
if isinstance(value, str):
self.__parameters['coloraxis_colorbar_tickfont_family'] = value
else:
raise TypeError("value must be a string.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKFONT_SIZE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickfont_size(self):
"""Returns the coloraxis_colorbar_tickfont_size attribute.
Sets tick font size.
"""
return self.__parameters['coloraxis_colorbar_tickfont_size']
@coloraxis_colorbar_tickfont_size.setter
def coloraxis_colorbar_tickfont_size(self, value):
"""Sets the coloraxis_colorbar_tickfont_size attribute.
Parameters
----------
value : int
Sets tick font size.
"""
if isinstance(value, int) and value >= 1:
self.__parameters['coloraxis_colorbar_tickfont_size'] = value
else:
raise ValueError("value must be an integer >= 1.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TICKFONT_COLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_tickfont_color(self):
"""Returns the coloraxis_colorbar_tickfont_color attribute.
Sets tick font color.
"""
return self.__parameters['coloraxis_colorbar_tickfont_color']
@coloraxis_colorbar_tickfont_color.setter
def coloraxis_colorbar_tickfont_color(self, value):
"""Sets the coloraxis_colorbar_tickfont_color attribute.
Parameters
----------
value : str
Sets tick font color.
"""
if isinstance(value, str):
self.__parameters['coloraxis_colorbar_tickfont_color'] = value
else:
raise TypeError("value must be a string.")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarNumbers #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarNumbers(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_separatethousands' : True,
'coloraxis_colorbar_exponentformat' : 'B',
'coloraxis_colorbar_showexponent' : 'all'
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_separatethousands'] = True
self.__parameters['coloraxis_colorbar_exponentformat'] = 'B'
self.__parameters['coloraxis_colorbar_showexponent'] = 'all'
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_separatethousands'] = self.DEFAULTS["coloraxis_colorbar_separatethousands"]
self.__parameters['coloraxis_colorbar_exponentformat'] = self.DEFAULTS["coloraxis_colorbar_exponentformat"]
self.__parameters['coloraxis_colorbar_showexponent'] = self.DEFAULTS["coloraxis_colorbar_showexponent"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR SEPARATETHOUSANDS PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_separatethousands(self):
"""Returns the coloraxis_colorbar_separatethousands attribute.
If "True", even 4-digit integers are separated.
"""
return self.__parameters['coloraxis_colorbar_separatethousands']
@coloraxis_colorbar_separatethousands.setter
def coloraxis_colorbar_separatethousands(self, value):
"""Sets the coloraxis_colorbar_separatethousands attribute.
Parameters
----------
value : bool
If "True", even 4-digit integers are separated.
"""
if isinstance(value, bool):
self.__parameters['coloraxis_colorbar_separatethousands'] = value
else:
raise TypeError("value must be a boolean, True or False.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR EXPONENTFORMAT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_exponentformat(self):
"""Returns the coloraxis_colorbar_exponentformat attribute.
Determines a formatting rule for the tick exponents.
"""
return self.__parameters['coloraxis_colorbar_exponentformat']
@coloraxis_colorbar_exponentformat.setter
def coloraxis_colorbar_exponentformat(self, value):
"""Sets the coloraxis_colorbar_exponentformat attribute.
Parameters
----------
value : bool. One of "none" | "e" | "E" | "power" | "SI" | "B"
Determines a formatting rule for the tick exponents.
"""
valid_values = ["none", "e", "E", "power", "SI", "B"]
if value in valid_values:
self.__parameters['coloraxis_colorbar_exponentformat'] = value
else:
raise ValueError("exponentformat must be 'none', 'e'\
, 'E', 'power', 'SI', or 'B'.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR SHOWEXPONENT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_showexponent(self):
"""Returns the coloraxis_colorbar_showexponent attribute.
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If "last",
only the exponent of the last tick is shown. If "none",
no exponents appear.
"""
return self.__parameters['coloraxis_colorbar_showexponent']
@coloraxis_colorbar_showexponent.setter
def coloraxis_colorbar_showexponent(self, value):
"""Sets the coloraxis_colorbar_showexponent attribute.
Parameters
----------
value : bool. One of "all" | "first" | "last" | "none"
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If "last",
only the exponent of the last tick is shown. If "none",
no exponents appear.
"""
valid_values = ['all', 'first', 'last', 'none']
if value in valid_values:
self.__parameters['coloraxis_colorbar_showexponent'] = value
else:
raise ValueError("showexponent must be 'all', 'first', 'last'\
, 'none'.")
# --------------------------------------------------------------------------- #
# CanvasColorAxisBarTitle #
# --------------------------------------------------------------------------- #
class CanvasColorAxisBarTitle(CanvasComponent):
"""Configuration options for plot colors."""
DEFAULTS = {
'coloraxis_colorbar_title_text' : "",
'coloraxis_colorbar_title_font_family' : None,
'coloraxis_colorbar_title_font_size' : 1,
'coloraxis_colorbar_title_font_color' : None,
'coloraxis_colorbar_title_side' : 'top'
}
def __init__(self):
self.__parameters = {}
self.__parameters['coloraxis_colorbar_title_text'] = ""
self.__parameters['coloraxis_colorbar_title_font_family'] = None
self.__parameters['coloraxis_colorbar_title_font_size'] = 1
self.__parameters['coloraxis_colorbar_title_font_color'] = None
self.__parameters['coloraxis_colorbar_title_side'] = 'top'
def reset(self):
"""Sets parameters back to their defaults."""
self.__parameters = {}
self.__parameters['coloraxis_colorbar_title_text'] = self.DEFAULTS["coloraxis_colorbar_title_text"]
self.__parameters['coloraxis_colorbar_title_font_family'] = self.DEFAULTS["coloraxis_colorbar_title_font_family"]
self.__parameters['coloraxis_colorbar_title_font_size'] = self.DEFAULTS["coloraxis_colorbar_title_font_size"]
self.__parameters['coloraxis_colorbar_title_font_color'] = self.DEFAULTS["coloraxis_colorbar_title_font_color"]
self.__parameters['coloraxis_colorbar_title_side'] = self.DEFAULTS["coloraxis_colorbar_title_side"]
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TITLE_TEXT PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_title_text(self):
"""Returns the coloraxis_colorbar_title_text attribute.
Sets the title of the color bar.
"""
return self.__parameters['coloraxis_colorbar_title_text']
@coloraxis_colorbar_title_text.setter
def coloraxis_colorbar_title_text(self, value):
"""Sets the coloraxis_colorbar_title_text attribute.
Parameters
----------
value : str
Sets the title of the color bar.
"""
self.__parameters['coloraxis_colorbar_title_text'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TITLE_FONT_FAMILY PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_title_font_family(self):
"""Returns the coloraxis_colorbar_title_font_family attribute.
Sets the font family of the title of the color bar.
"""
return self.__parameters['coloraxis_colorbar_title_font_family']
@coloraxis_colorbar_title_font_family.setter
def coloraxis_colorbar_title_font_family(self, value):
"""Sets the coloraxis_colorbar_title_font_family attribute.
Parameters
----------
value : str
Sets the font family of the title of the color bar.
"""
self.__parameters['coloraxis_colorbar_title_font_family'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TITLE_FONT_SIZE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_title_font_size(self):
"""Returns the coloraxis_colorbar_title_font_size attribute.
Sets the font size of the title of the color bar.
"""
return self.__parameters['coloraxis_colorbar_title_font_size']
@coloraxis_colorbar_title_font_size.setter
def coloraxis_colorbar_title_font_size(self, value):
"""Sets the coloraxis_colorbar_title_font_size attribute.
Parameters
----------
value : int >= 1
Sets the font size of the title of the color bar.
"""
if isinstance(value, (int, float)) and value >= 1:
self.__parameters['coloraxis_colorbar_title_font_size'] = value
else:
raise ValueError("value must be an number >= 1.")
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TITLE_FONT_COLOR PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_title_font_color(self):
"""Returns the coloraxis_colorbar_title_font_color attribute.
Sets the font color of the title of the color bar.
"""
return self.__parameters['coloraxis_colorbar_title_font_color']
@coloraxis_colorbar_title_font_color.setter
def coloraxis_colorbar_title_font_color(self, value):
"""Sets the coloraxis_colorbar_title_font_color attribute.
Parameters
----------
value : str
Sets the font color of the title of the color bar.
"""
self.__parameters['coloraxis_colorbar_title_font_color'] = value
# ----------------------------------------------------------------------- #
# COLORAXIS COLORBAR TITLE_SIDE PROPERTIES #
# ----------------------------------------------------------------------- #
@property
def coloraxis_colorbar_title_side(self):
"""Returns the coloraxis_colorbar_title_side attribute.
Determines the location of color bar's title with respect to the
color bar.
"""
return self.__parameters['coloraxis_colorbar_title_side']
@coloraxis_colorbar_title_side.setter
def coloraxis_colorbar_title_side(self, value):
"""Sets the coloraxis_colorbar_title_side attribute.
Parameters
----------
value : str. One of 'right', 'top', 'bottom'. Default = 'top'
Determines the location of color bar's title with respect to the
color bar. 0
"""
valid_values = ['right', 'top', 'bottom']
if value in valid_values:
self.__parameters['coloraxis_colorbar_title_side'] = value
else:
raise ValueError("colorbar_title_side must be 'right', 'top', \
or 'bottom'.")
|
the-stack_106_31596 |
import warnings
import logging
logging.basicConfig(level=logging.INFO)
def send_warnings_to_log(message, category, filename, lineno, file=None):
logging.warning(
'%s:%s: %s:%s' %
(filename, lineno, category.__name__, message))
old_showwarning = warnings.showwarning
warnings.showwarning = send_warnings_to_log
warnings.warn('message')
|
the-stack_106_31597 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 02:03:52 2017
@author: Frank
In:
In/*.*
Out:
In/*.JPEG
"""
import os
import init as config
from argparse import ArgumentParser
import cv2
from PIL import Image
#from gooey import Gooey
def gif2png(oldname,newname,ext='png'):
im = Image.open(oldname)
im.save(newname,'png', optimize=True, quality=70)
def tif2jpg(oldname,newname):
im = Image.open(oldname)
im.thumbnail(im.size)
im.save(newname, "JPEG", quality=100)
def chgext(in_dir=config.in_dir,
out_dir=config.out_dir,
subdir="",
in_ext=".jpg",
out_ext='.png'):
for filename in os.listdir(in_dir + subdir):
fullpath = in_dir + subdir + filename
newpath = out_dir + subdir + filename
if in_dir == out_dir:
base = os.path.splitext(fullpath)[0]
else:
base = os.path.splitext(newpath)[0]
newpath = base + out_ext
if in_ext == '.gif':
gif2png(fullpath,newpath)
elif in_ext == '.tif':
tif2jpg(fullpath,newpath)
else:
#img=cv2.imread(fullpath)
#cv2.imwrite(newpath,img)
os.rename(fullpath, newpath)
#@Gooey(language='english')
def main():
parser = ArgumentParser(add_help=False)#description='Resize')
parser.add_argument("-s", "--subdir", default='001', help="select which directory")
#args = parser.parse_args()
chgext(in_dir=config.in_dir, out_dir=config.merged_dir, in_ext=".tif", out_ext='.jpg')#args.subdir+'/')
if __name__ == '__main__':
main()
|
the-stack_106_31601 |
import numpy as np
import copy
import fns
from sklearn.cross_decomposition import PLSRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import scale
from . import PLSRsave
from . import PLSRwavelengthSelection
from . import PLSRregressionMethods
class sequentialFeatureSelector():
def __init__(self,common_variables,ui,case,draw_fun):
self.case=case
self.MLR_reg_module = PLSRregressionMethods.getRegModule('MLR',case.keywords)
self.reg_module = PLSRregressionMethods.getRegModule(ui['reg_type'],case.keywords)
self.ui=ui
#self.independentVariables=con.GAIndependentVariables
'''self.numberOfIndividuals=ui['GA_number_of_individuals']#100#
if self.numberOfIndividuals//2==0:
print('Number of individuals must be odd, as they are mated in pairs, and the best is always kept. Changed to: '+str(ui['GA_number_of_individuals']+1))'''
self.common_variables=common_variables
self.T=copy.deepcopy(case.T)
self.V=copy.deepcopy(case.V)
#cut away excess datapoints as described by user
self.numDatapoints=self.T.X.shape[1]
self.draw_fun=draw_fun
if self.reg_module.type=='regression':
if ui['WS_loss_type']=='X-validation on training':
self.rmse_string='RMSECV'
elif ui['WS_loss_type']=='RMSEC on training':
self.rmse_string='RMSEC'
else: # ui['WS_loss_type']=='RMSEP on validation':
self.rmse_string='RMSEP'
else: #self.reg_module.type=='classifier':
if ui['WS_loss_type']=='X-validation on training':
self.rmse_string='CV % wrong'
elif ui['WS_loss_type']=='RMSEC on training':
self.rmse_string='calib % wrong'
else: # ui['WS_loss_type']=='RMSEP on validation':
self.rmse_string='pred % wrong'
def run(self):
'''if self.ui['SFS type']=='Forward':
return self.forward_selection()
if self.ui['SFS type']=='Backwards':
return self.backwards_selection()
def forward_selection(self):'''
wavenumbers=self.case.wavenumbers
if 'Forward' in self.ui['SFS type']:
direction='Forward '
current_active_wavenumbers=np.zeros(len(wavenumbers), dtype=bool)
elif 'Backward' in self.ui['SFS type']:
direction='Backward'
current_active_wavenumbers=np.ones(len(wavenumbers), dtype=bool)
if self.ui['SFS_floating']:
floating=True
else:
floating=False
ax=fns.add_axis(self.common_variables.fig,self.ui['fig_per_row'],self.ui['max_plots'])
# calculate the needed X-val splits and store them
PLSRwavelengthSelection.WS_getCrossvalSplits([0,1],self.T,self.V,self.ui,use_stored=False)
PLSRsave.PlotChromosomes(ax,wavenumbers,[],self.ui,ylabel='Iteration')
if self.ui['SFS type']=='Forward':
current_active_wavenumbers=np.zeros(len(wavenumbers), dtype=bool)
elif self.ui['SFS type']=='Backwards':
current_active_wavenumbers=np.ones(len(wavenumbers), dtype=bool)
best_historic_active=[]
best_loss=[]
generation=0
while True:
#main step
if direction=='Forward ':
trail_active_wavenumbers=self.get_trails_forward(current_active_wavenumbers)
else: # direction=='Backward'
trail_active_wavenumbers=self.get_trails_backward(current_active_wavenumbers)
if len(trail_active_wavenumbers)==0:
break
trail_active_wavenumbers=cut_previous(trail_active_wavenumbers,best_historic_active)
current_active_wavenumbers, l, out_str= self.do_pass(trail_active_wavenumbers,generation)
print(direction+' '+out_str)
best_loss.append(l)
PLSRsave.PlotChromosome(ax,wavenumbers,current_active_wavenumbers,generation)
self.draw_fun()
best_historic_active.append(copy.copy(current_active_wavenumbers))
best_historic_generation=np.argmin(best_loss)
generation+=1
if generation==self.ui['SFS_max_iterations']:
break
if floating:
while True:
if direction=='Forward ':
if np.sum(current_active_wavenumbers)==1:
break
else:
trail_active_wavenumbers=self.get_trails_backward(current_active_wavenumbers) #reverse of main loop
else: # direction=='Backward'
if np.sum(current_active_wavenumbers)==len(current_active_wavenumbers):
break
trail_active_wavenumbers=self.get_trails_forward(current_active_wavenumbers) #reverse of main loop
trail_active_wavenumbers=cut_previous(trail_active_wavenumbers,best_historic_active)
if len(trail_active_wavenumbers)==0:
break
best_trail, l, out_str = self.do_pass(trail_active_wavenumbers,generation)
if l<best_loss[-1]:
print('Floating'+' '+out_str)
current_active_wavenumbers=best_trail
best_loss.append(l)
PLSRsave.PlotChromosome(ax,wavenumbers,current_active_wavenumbers,generation)
self.draw_fun()
best_historic_active.append(copy.copy(current_active_wavenumbers))
best_historic_generation=np.argmin(best_loss)
generation+=1
else:
break
if generation==self.ui['SFS_max_iterations'] or best_historic_generation<len(best_loss)-self.ui['SFS_num_after_min'] or np.sum(current_active_wavenumbers)==self.ui['SFS_target']:
break
print('best iteration '+str(best_historic_generation+1)+', best '+self.rmse_string+' = '+PLSRsave.custom_round(best_loss[best_historic_generation],2))
PLSRsave.PlotChromosome(ax,wavenumbers,best_historic_active[best_historic_generation],best_historic_generation,color=[1,0,0,1])
if self.ui['save_check_var']==1:
PLSRsave.PlotChromosomes(self.common_variables.tempax,wavenumbers,best_historic_active,self.ui,ylabel='Iteration')
PLSRsave.PlotChromosome(self.common_variables.tempax,wavenumbers,best_historic_active[best_historic_generation],best_historic_generation,color=[1,0,0,1])
self.common_variables.tempfig.subplots_adjust(bottom=0.13,left=0.15, right=0.97, top=0.9)
unique_keywords=PLSRsave.get_unique_keywords_formatted(self.common_variables.keyword_lists,self.case.keywords)
plotFileName=case.folder+self.ui['reg_type']+unique_keywords.replace('.','p')+'SFS'
self.common_variables.tempfig.savefig(plotFileName.replace('.','p')+self.ui['file_extension'])
return best_historic_active[best_historic_generation]
def get_trails_forward(self,current_active_wavenumbers):
trail_active_wavenumbers=[]
for i,act in enumerate(current_active_wavenumbers):
if act==False:
trail_active_wavenumbers.append(copy.copy(current_active_wavenumbers))
trail_active_wavenumbers[-1][i]=True
return trail_active_wavenumbers
def get_trails_backward(self,current_active_wavenumbers):
trail_active_wavenumbers=[]
for i,act in enumerate(current_active_wavenumbers):
if act==True:
trail_active_wavenumbers.append(copy.copy(current_active_wavenumbers))
trail_active_wavenumbers[-1][i]=False
return trail_active_wavenumbers
def do_pass(self,trail_active_wavenumbers,generation):
losses,used_mlr=PLSRwavelengthSelection.WS_evaluate_chromosomes(self.reg_module,
self.T, self.V, trail_active_wavenumbers,
use_stored=True, backup_reg_module=self.MLR_reg_module)
best_i=np.argmin(losses)
if used_mlr:
out_str='iteration '+str(generation+1)+' done, best '+self.rmse_string+' = '+PLSRsave.custom_round(losses[best_i],2)+' (using MLR)'
return trail_active_wavenumbers[best_i],losses[best_i]+10000,out_str # increase the value of the loss here, because we do not wat to permit the algorithm for choosing this as the best case
else:
out_str='iteration '+str(generation+1)+' done, best '+self.rmse_string+' = '+PLSRsave.custom_round(losses[best_i],2)
return trail_active_wavenumbers[best_i],losses[best_i],out_str
def cut_previous(trail_active_wavenumbers,best_historic_active):
new_trails=[]
for trail in trail_active_wavenumbers:
keep=True
for historic in best_historic_active:
if (trail==historic).all():
keep=False
break
if keep:
new_trails.append(trail)
return new_trails
|
the-stack_106_31602 | # coding: utf-8
from configs.stock import stock_list
from engine.base_engine import BaseEngine
from helpers.quotes import get_realtime_quotes, get_realtime_index, get_realtime_class_index
from gevent import monkey
monkey.patch_all()
class DefaultQuotationEngine(BaseEngine):
"""行情推送引擎"""
EventType = 'quotation'
stock_codes = []
def init(self):
self.stock_codes = stock_list
def fetch_quotation(self):
return get_realtime_quotes(self.stock_codes)
def set_stock_codes(self,code_list=None):
if code_list is not None:
self.stock_codes = code_list
|
the-stack_106_31604 | # Event: LCCS Python Fundamental Skills Workshop
# Date: Dec 2018
# Author: Joe English, PDST
# eMail: [email protected]
# Purpose: A program to find the maximum of 3 values
# A function to find the largest of 3 numbers
def maxOf3(x, y, z):
if (x > y) and (x > z):
return x
elif (y > x) and (y > z):
return y
elif (z > x) and (z > y):
return z
# Test the function
print(maxOf3(1, 2, 3))
print(maxOf3(1, 3, 2))
print(maxOf3(3, 2, 1))
|
the-stack_106_31605 | """
===================================================================
Compute MNE inverse solution on evoked data in a mixed source space
===================================================================
Create a mixed source space and compute MNE inverse solution on an
evoked dataset.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
from nilearn import plotting
import mne
from mne.minimum_norm import make_inverse_operator, apply_inverse
# Set dir
data_path = mne.datasets.sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
###############################################################################
# Set up our source space
# -----------------------
# List substructures we are interested in. We select only the
# sub structures we want to include in the source space:
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
###############################################################################
# Get a surface-based source space, here with few source points for speed
# in this demonstration, in general you should use oct6 spacing!
src = mne.setup_source_space(subject, spacing='oct5',
add_dist=False, subjects_dir=subjects_dir)
###############################################################################
# Now we create a mixed src space by adding the volume regions specified in the
# list labels_vol. First, read the aseg file and the source space bounds
# using the inner skull surface (here using 10mm spacing to save time,
# we recommend something smaller like 5.0 in actual analyses):
vol_src = mne.setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=False, # just for speed, usually this should be True
verbose=True)
# Generate the mixed source space
src += vol_src
# Visualize the source space.
src.plot(subjects_dir=subjects_dir)
n = sum(src[i]['nuse'] for i in range(len(src)))
print('the src space contains %d spaces and %d points' % (len(src), n))
###############################################################################
# We could write the mixed source space with::
#
# >>> write_source_spaces(fname_mixed_src, src, overwrite=True)
#
# We can also export source positions to nifti file and visualize it again:
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True, overwrite=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
###############################################################################
# Compute the fwd matrix
# ----------------------
fwd = mne.make_forward_solution(
fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
src_fwd = fwd['src']
n = sum(src_fwd[i]['nuse'] for i in range(len(src_fwd)))
print('the fwd src space contains %d spaces and %d points' % (len(src_fwd), n))
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
###############################################################################
# Compute inverse solution
# ------------------------
snr = 3.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
inverse_operator = make_inverse_operator(evoked.info, fwd, noise_cov,
depth=None, fixed=False)
stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
src = inverse_operator['src']
###############################################################################
# Plot the surface
# ----------------
initial_time = 0.1
brain = stc.surface().plot(initial_time=initial_time,
subjects_dir=subjects_dir)
###############################################################################
# Plot the volume
# ----------------
# sphinx_gallery_thumbnail_number = 4
fig = stc.volume().plot(initial_time=initial_time, src=src,
subjects_dir=subjects_dir)
###############################################################################
# Process labels
# --------------
# Average the source estimates within each label of the cortical parcellation
# and each sub structure contained in the src space
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
subject, parc=parc, subjects_dir=subjects_dir)
label_ts = mne.extract_label_time_course(
[stc], labels_parc, src, mode='mean', allow_empty=True)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][71, :].T, 'r', label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
|
the-stack_106_31607 | import json
import re
from .utils import get_page
from pyquery import PyQuery as pq
class ProxyMetaclass(type):
def __new__(cls, name, bases, attrs):
count = 0
attrs['__CrawlFunc__'] = []
for k, v in attrs.items():
if 'crawl_' in k:
attrs['__CrawlFunc__'].append(k)
count += 1
attrs['__CrawlFuncCount__'] = count
return type.__new__(cls, name, bases, attrs)
class Crawler(object, metaclass=ProxyMetaclass):
def get_proxies(self, callback):
proxies = []
for proxy in eval("self.{}()".format(callback)):
print('成功获取到代理', proxy)
proxies.append(proxy)
return proxies
'''
def crawl_daxiang(self):
url = 'http://vtp.daxiangdaili.com/ip/?tid=559363191592228&num=50&filter=on'
html = get_page(url)
if html:
urls = html.split('\n')
for url in urls:
yield url
def crawl_daili66(self, page_count=4):
"""
获取代理66
:param page_count: 页码
:return: 代理
"""
start_url = 'http://www.66ip.cn/{}.html'
urls = [start_url.format(page) for page in range(1, page_count + 1)]
for url in urls:
print('Crawling', url)
html = get_page(url)
if html:
doc = pq(html)
trs = doc('.containerbox table tr:gt(0)').items()
for tr in trs:
ip = tr.find('td:nth-child(1)').text()
port = tr.find('td:nth-child(2)').text()
yield ':'.join([ip, port])
def crawl_proxy360(self):
"""
获取Proxy360
:return: 代理
"""
start_url = 'http://www.proxy360.cn/Region/China'
print('Crawling', start_url)
html = get_page(start_url)
if html:
doc = pq(html)
lines = doc('div[name="list_proxy_ip"]').items()
for line in lines:
ip = line.find('.tbBottomLine:nth-child(1)').text()
port = line.find('.tbBottomLine:nth-child(2)').text()
yield ':'.join([ip, port])
def crawl_goubanjia(self):
"""
获取Goubanjia
:return: 代理
"""
start_url = 'http://www.goubanjia.com/free/gngn/index.shtml'
html = get_page(start_url)
if html:
doc = pq(html)
tds = doc('td.ip').items()
for td in tds:
td.find('p').remove()
yield td.text().replace(' ', '')
def crawl_ip181(self):
start_url = 'http://www.ip181.com/'
html = get_page(start_url)
ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s* 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address,port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
def crawl_ip3366(self):
for page in range(1, 4):
start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
html = get_page(start_url)
ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s * 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address+':'+ port
yield result.replace(' ', '')
def crawl_kxdaili(self):
for i in range(1, 11):
start_url = 'http://www.kxdaili.com/ipList/{}.html#ip'.format(i)
html = get_page(start_url)
ip_address = re.compile('<tr.*?>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
# \s* 匹配空格,起到换行作用
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
def crawl_premproxy(self):
for i in ['China-01','China-02','China-03','China-04','Taiwan-01']:
start_url = 'https://premproxy.com/proxy-by-country/{}.htm'.format(i)
html = get_page(start_url)
if html:
ip_address = re.compile('<td data-label="IP:port ">(.*?)</td>')
re_ip_address = ip_address.findall(html)
for address_port in re_ip_address:
yield address_port.replace(' ','')
'''
def crawl_xroxy(self):
for i in ['CN', 'TW']:
start_url = 'http://www.xroxy.com/proxylist.php?country={}'.format(i)
html = get_page(start_url)
if html:
ip_address1 = re.compile("title='View this Proxy details'>\s*(.*).*")
re_ip_address1 = ip_address1.findall(html)
ip_address2 = re.compile("title='Select proxies with port number .*'>(.*)</a>")
re_ip_address2 = ip_address2.findall(html)
for address,port in zip(re_ip_address1,re_ip_address2):
address_port = address+':'+port
yield address_port.replace(' ','')
'''
def crawl_kuaidaili(self):
for i in range(1, 4):
start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
html = get_page(start_url)
if html:
ip_address = re.compile('<td data-title="IP">(.*?)</td>')
re_ip_address = ip_address.findall(html)
port = re.compile('<td data-title="PORT">(.*?)</td>')
re_port = port.findall(html)
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_xicidaili(self):
for i in range(1, 3):
start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie': 'free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
'Host': 'www.xicidaili.com',
'Referer': 'http://www.xicidaili.com/nn/3',
'Upgrade-Insecure-Requests': '1',
}
html = get_page(start_url, options=headers)
if html:
find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
trs = find_trs.findall(html)
for tr in trs:
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
re_ip_address = find_ip.findall(tr)
find_port = re.compile('<td>(\d+)</td>')
re_port = find_port.findall(tr)
for address,port in zip(re_ip_address, re_port):
address_port = address+':' + port
yield address_port.replace(' ', '')
def crawl_ip3366(self):
for i in range(1, 4):
start_url_sy = 'http://www.ip3366.net/?stype=1&page={}'.format(i)
start_url_gn = 'http://www.ip3366.net/free/?stype=1&page={}'.format(i)
start_url_gw = 'http://www.ip3366.net/free/?stype=3&page={}'.format(i)
url_list = [start_url_sy, start_url_gn, start_url_gw]
for start_url in url_list:
print(start_url)
html = get_page(start_url)
if html:
find_tr = re.compile('<tr>(.*?)</tr>', re.S)
trs = find_tr.findall(html)
for s in range(1, len(trs)):
find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
re_ip_address = find_ip.findall(trs[s])
find_port = re.compile('<td>(\d+)</td>')
re_port = find_port.findall(trs[s])
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_iphai(self):
# start_url = 'http://www.iphai.com/'
# 国内高匿
start_url_ng = 'http://www.iphai.com/free/ng'
# 国外高匿
start_url_wg = 'http://www.iphai.com/free/wg'
urllist = [start_url_ng, start_url_wg]
for start_url in urllist:
print(start_url)
html = get_page(start_url)
if html:
find_tr = re.compile('<tr>(.*?)</tr>', re.S)
trs = find_tr.findall(html)
for s in range(1, len(trs)):
find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
re_ip_address = find_ip.findall(trs[s])
find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
re_port = find_port.findall(trs[s])
for address,port in zip(re_ip_address, re_port):
address_port = address+':'+port
yield address_port.replace(' ','')
def crawl_89ip(self):
# 首页代理
start_url_sy = 'http://www.ip3366.net/'
# 国内高匿
start_url_gn = 'http://www.ip3366.net/free/?stype=1'
# 国外高匿
start_url_gw = 'http://www.ip3366.net/free/?stype=3'
url_list = [start_url_sy, start_url_gn, start_url_gw]
for start_url in url_list:
print(start_url)
html = get_page(start_url)
if html:
find_ips = re.compile('(\d+\.\d+\.\d+\.\d+:\d+)', re.S)
ip_ports = find_ips.findall(html)
for address_port in ip_ports:
yield address_port
def crawl_data5u(self):
start_url_gn = 'http://www.data5u.com/free/gngn/index.shtml'
start_url_gw = 'http://www.data5u.com/free/gwgn/index.shtml'
url_list = [start_url_gn, start_url_gw]
for start_url in url_list:
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
'Host': 'www.data5u.com',
'Referer': 'http://www.data5u.com/free/index.shtml',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
}
html = get_page(start_url, options=headers)
if html:
ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S)
re_ip_address = ip_address.findall(html)
for address, port in re_ip_address:
result = address + ':' + port
yield result.replace(' ', '')
'''
|
the-stack_106_31608 | # pylint: disable=too-many-lines
import os
import errno
import functools
import hashlib
import operator
import posixpath
import warnings
from datetime import timedelta
from itertools import islice, chain
from jinja2 import Undefined, is_undefined
from jinja2.utils import LRUCache
from jinja2.exceptions import UndefinedError
from werkzeug.urls import url_join
from werkzeug.utils import cached_property
from lektor._compat import string_types, text_type, integer_types, \
iteritems, range_type
from lektor import metaformat
from lektor.utils import sort_normalize_string, cleanup_path, \
untrusted_to_os_path, fs_enc, locate_executable
from lektor.sourceobj import SourceObject, VirtualSourceObject
from lektor.context import get_ctx, Context
from lektor.datamodel import load_datamodels, load_flowblocks
from lektor.imagetools import (
ThumbnailMode, make_image_thumbnail,
read_exif, get_image_info,
)
from lektor.assets import Directory
from lektor.editor import make_editor_session
from lektor.environment import PRIMARY_ALT
from lektor.databags import Databags
from lektor.filecontents import FileContents
from lektor.utils import make_relative_url, split_virtual_path
from lektor.videotools import get_video_info, make_video_thumbnail
# pylint: disable=no-member
def get_alts(source=None, fallback=False):
"""Given a source this returns the list of all alts that the source
exists as. It does not include fallbacks unless `fallback` is passed.
If no source is provided all configured alts are returned. If alts are
not configured at all, the return value is an empty list.
"""
if source is None:
ctx = get_ctx()
if ctx is None:
raise RuntimeError('This function requires the context to be supplied.')
pad = ctx.pad
else:
pad = source.pad
alts = list(pad.config.iter_alternatives())
if alts == [PRIMARY_ALT]:
return []
rv = alts
# If a source is provided and it's not virtual, we look up all alts
# of the path on the pad to figure out which records exist.
if source is not None and '@' not in source.path:
rv = []
for alt in alts:
if pad.alt_exists(source.path, alt=alt,
fallback=fallback):
rv.append(alt)
return rv
def _process_slug(slug, last_segment=False):
if last_segment:
return slug
segments = slug.split('/')
if '.' not in segments[-1]:
return slug
if len(segments) == 1:
return '_' + segments[0]
return segments[0] + '/_' + segments[1]
def _require_ctx(record):
ctx = get_ctx()
if ctx is None:
raise RuntimeError('This operation requires a context but none was '
'on the stack.')
if ctx.pad is not record.pad:
raise RuntimeError('The context on the stack does not match the '
'pad of the record.')
return ctx
def _is_content_file(filename, alt=PRIMARY_ALT):
if filename == 'contents.lr':
return True
if alt != PRIMARY_ALT and filename == 'contents+%s.lr' % alt:
return True
return False
class _CmpHelper(object):
def __init__(self, value, reverse):
self.value = value
self.reverse = reverse
@staticmethod
def coerce(a, b):
if isinstance(a, string_types) and isinstance(b, string_types):
return sort_normalize_string(a), sort_normalize_string(b)
if type(a) is type(b):
return a, b
if isinstance(a, Undefined) or isinstance(b, Undefined):
if isinstance(a, Undefined):
a = None
if isinstance(b, Undefined):
b = None
return a, b
if isinstance(a, integer_types) or isinstance(a, float):
try:
return a, type(a)(b)
except (ValueError, TypeError, OverflowError):
pass
if isinstance(b, integer_types) or isinstance(b, float):
try:
return type(b)(a), b
except (ValueError, TypeError, OverflowError):
pass
return a, b
def __eq__(self, other):
a, b = self.coerce(self.value, other.value)
return a == b
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
a, b = self.coerce(self.value, other.value)
try:
if self.reverse:
return b < a
return a < b
except TypeError:
# Put None at the beginning if reversed, else at the end.
if self.reverse:
return a is not None
return a is None
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return not self.__lt__(other)
def _auto_wrap_expr(value):
if isinstance(value, Expression):
return value
return _Literal(value)
def save_eval(filter, record):
try:
return filter.__eval__(record)
except UndefinedError as e:
return Undefined(e.message)
class Expression(object):
def __eval__(self, record):
return record
def __eq__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.eq)
def __ne__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.ne)
def __and__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.and_)
def __or__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.or_)
def __gt__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.gt)
def __ge__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.ge)
def __lt__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.lt)
def __le__(self, other):
return _BinExpr(self, _auto_wrap_expr(other), operator.le)
def contains(self, item):
return _ContainmentExpr(self, _auto_wrap_expr(item))
def startswith(self, other):
return _BinExpr(self, _auto_wrap_expr(other),
lambda a, b: text_type(a).lower().startswith(text_type(b).lower()))
def endswith(self, other):
return _BinExpr(self, _auto_wrap_expr(other),
lambda a, b: text_type(a).lower().endswith(text_type(b).lower()))
def startswith_cs(self, other):
return _BinExpr(self, _auto_wrap_expr(other),
lambda a, b: text_type(a).startswith(text_type(b)))
def endswith_cs(self, other):
return _BinExpr(self, _auto_wrap_expr(other),
lambda a, b: text_type(a).endswith(text_type(b)))
def false(self):
return _IsBoolExpr(self, False)
def true(self):
return _IsBoolExpr(self, True)
# Query helpers for the template engine
setattr(Expression, 'and', lambda x, o: x & o)
setattr(Expression, 'or', lambda x, o: x | o)
class _CallbackExpr(Expression):
def __init__(self, func):
self.func = func
def __eval__(self, record):
return self.func(record)
class _IsBoolExpr(Expression):
def __init__(self, expr, true):
self.__expr = expr
self.__true = true
def __eval__(self, record):
val = self.__expr.__eval__(record)
return (not is_undefined(val) and
val not in (None, 0, False, '')) == self.__true
class _Literal(Expression):
def __init__(self, value):
self.__value = value
def __eval__(self, record):
return self.__value
class _BinExpr(Expression):
def __init__(self, left, right, op):
self.__left = left
self.__right = right
self.__op = op
def __eval__(self, record):
return self.__op(
self.__left.__eval__(record),
self.__right.__eval__(record)
)
class _ContainmentExpr(Expression):
def __init__(self, seq, item):
self.__seq = seq
self.__item = item
def __eval__(self, record):
seq = self.__seq.__eval__(record)
item = self.__item.__eval__(record)
if isinstance(item, Record):
item = item['_id']
return item in seq
class _RecordQueryField(Expression):
def __init__(self, field):
self.__field = field
def __eval__(self, record):
try:
return record[self.__field]
except KeyError:
return Undefined(obj=record, name=self.__field)
class _RecordQueryProxy(object):
def __getattr__(self, name):
if name[:2] != '__':
return _RecordQueryField(name)
raise AttributeError(name)
def __getitem__(self, name):
try:
return self.__getattr__(name)
except AttributeError:
raise KeyError(name)
F = _RecordQueryProxy()
class Record(SourceObject):
source_classification = 'record'
supports_pagination = False
def __init__(self, pad, data, page_num=None):
SourceObject.__init__(self, pad)
self._data = data
self._bound_data = {}
if page_num is not None and not self.supports_pagination:
raise RuntimeError('%s does not support pagination' %
self.__class__.__name__)
self.page_num = page_num
@property
def record(self):
return self
@property
def datamodel(self):
"""Returns the data model for this record."""
try:
return self.pad.db.datamodels[self._data['_model']]
except LookupError:
# If we cannot find the model we fall back to the default one.
return self.pad.db.default_model
@property
def alt(self):
"""Returns the alt of this source object."""
return self['_alt']
@property
def is_hidden(self):
"""Indicates if a record is hidden. A record is considered hidden
if the record itself is hidden or the parent is.
"""
if not is_undefined(self._data['_hidden']):
return self._data['_hidden']
return self._is_considered_hidden()
def _is_considered_hidden(self):
parent = self.parent
if parent is None:
return False
hidden_children = parent.datamodel.child_config.hidden
if hidden_children is not None:
return hidden_children
return parent.is_hidden
@property
def is_discoverable(self):
"""Indicates if the page is discoverable without knowing the URL."""
return self._data['_discoverable'] and not self.is_hidden
@cached_property
def pagination(self):
"""Returns the pagination controller for the record."""
if not self.supports_pagination:
raise AttributeError()
return self.datamodel.pagination_config.get_pagination_controller(self)
@cached_property
def contents(self):
return FileContents(self.source_filename)
def get_fallback_record_label(self, lang):
if not self['_id']:
return '(Index)'
return self['_id'].replace('-', ' ').replace('_', ' ').title()
def get_record_label_i18n(self):
rv = {}
for lang, _ in iteritems((self.datamodel.label_i18n or {})):
label = self.datamodel.format_record_label(self, lang)
if not label:
label = self.get_fallback_record_label(lang)
rv[lang] = label
# Fill in english if missing
if not rv:
rv['en'] = self.get_fallback_record_label('en')
return rv
@property
def record_label(self):
return (self.get_record_label_i18n() or {}).get('en')
@property
def url_path(self):
"""The target path where the record should end up."""
prefix, suffix = self.pad.db.config.get_alternative_url_span(
self.alt)
bits = []
node = self
while node is not None:
bits.append(_process_slug(node['_slug'], node is self))
node = node.parent
bits.reverse()
clean_path = '/'.join(bits).strip('/')
if prefix:
clean_path = prefix + clean_path
if suffix:
# XXX: 404.html with suffix -de becomes 404.html-de but should
# actually become 404-de.html
clean_path += suffix
return '/' + clean_path.strip('/')
@property
def path(self):
return self['_path']
def get_sort_key(self, fields):
"""Returns a sort key for the given field specifications specific
for the data in the record.
"""
rv = [None] * len(fields)
for idx, field in enumerate(fields):
if field[:1] == '-':
field = field[1:]
reverse = True
else:
field = field.lstrip('+')
reverse = False
rv[idx] = _CmpHelper(self._data.get(field), reverse)
return rv
def __contains__(self, name):
return name in self._data and not is_undefined(self._data[name])
def __getitem__(self, name):
rv = self._bound_data.get(name, Ellipsis)
if rv is not Ellipsis:
return rv
rv = self._data[name]
if hasattr(rv, '__get__'):
rv = rv.__get__(self)
self._bound_data[name] = rv
return rv
def __eq__(self, other):
if self is other:
return True
if self.__class__ != other.__class__:
return False
return self['_path'] == other['_path']
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.path)
def __repr__(self):
return '<%s model=%r path=%r%s%s>' % (
self.__class__.__name__,
self['_model'],
self['_path'],
self.alt != PRIMARY_ALT and ' alt=%r' % self.alt or '',
self.page_num is not None and ' page_num=%r' % self.page_num or '',
)
class Siblings(VirtualSourceObject): # pylint: disable=abstract-method
def __init__(self, record, prev_page, next_page):
"""Virtual source representing previous and next sibling of 'record'."""
VirtualSourceObject.__init__(self, record)
self._path = record.path + '@siblings'
self._prev_page = prev_page
self._next_page = next_page
@property
def path(self):
# Used as a key in Context.referenced_virtual_dependencies.
return self._path
@property
def prev_page(self):
return self._prev_page
@property
def next_page(self):
return self._next_page
def iter_source_filenames(self):
for page in self._prev_page, self._next_page:
if page:
yield page.source_filename
def _file_infos(self, path_cache):
for page in self._prev_page, self._next_page:
if page:
yield path_cache.get_file_info(page.source_filename)
def get_mtime(self, path_cache):
mtimes = [i.mtime for i in self._file_infos(path_cache)]
return max(mtimes) if mtimes else None
def get_checksum(self, path_cache):
sums = '|'.join(i.filename_and_checksum
for i in self._file_infos(path_cache))
return sums or None
def siblings_resolver(node, url_path):
return node.get_siblings()
class Page(Record):
"""This represents a loaded record."""
is_attachment = False
supports_pagination = True
@cached_property
def path(self):
rv = self['_path']
if self.page_num is not None:
rv = '%s@%s' % (rv, self.page_num)
return rv
@cached_property
def record(self):
if self.page_num is None:
return self
return self.pad.get(self['_path'],
persist=self.pad.cache.is_persistent(self),
alt=self.alt)
@property
def source_filename(self):
if self.alt != PRIMARY_ALT:
return os.path.join(self.pad.db.to_fs_path(self['_path']),
'contents+%s.lr' % self.alt)
return os.path.join(self.pad.db.to_fs_path(self['_path']),
'contents.lr')
def iter_source_filenames(self):
yield self.source_filename
if self.alt != PRIMARY_ALT:
yield os.path.join(self.pad.db.to_fs_path(self['_path']),
'contents.lr')
@property
def url_path(self):
# pylint: disable=no-value-for-parameter
rv = Record.url_path.__get__(self).rstrip('/')
last_part = rv.rsplit('/')[-1]
if '.' not in last_part:
rv += '/'
if self.page_num in (1, None):
return rv
if '.' in last_part:
raise RuntimeError('When file extensions is provided pagination '
'cannot be used.')
return '%s%s/%d/' % (
rv,
self.datamodel.pagination_config.url_suffix.strip('/'),
self.page_num,
)
def resolve_url_path(self, url_path):
pg = self.datamodel.pagination_config
# If we hit the end of the url path, then we found our target.
# However if pagination is enabled we want to resolve the first
# page instead of the unpaginated version.
if not url_path:
if pg.enabled and self.page_num is None:
return pg.get_record_for_page(self, 1)
return self
# Try to resolve the correctly paginated version here.
elif pg.enabled:
rv = pg.match_pagination(self, url_path)
if rv is not None:
return rv
# When we resolve URLs we also want to be able to explicitly
# target undiscoverable pages. Those who know the URL are
# rewarded.
q = self.children.include_undiscoverable(True)
for idx in range_type(len(url_path)):
piece = '/'.join(url_path[:idx + 1])
child = q.filter(F._slug == piece).first()
if child is None:
attachment = self.attachments.filter(F._slug == piece).first()
if attachment is None:
obj = self.pad.db.env.resolve_custom_url_path(
self, url_path)
if obj is None:
continue
node = obj
else:
node = attachment
else:
node = child
rv = node.resolve_url_path(url_path[idx + 1:])
if rv is not None:
return rv
return None
@cached_property
def parent(self):
"""The parent of the record."""
this_path = self._data['_path']
parent_path = posixpath.dirname(this_path)
if parent_path != this_path:
return self.pad.get(parent_path,
persist=self.pad.cache.is_persistent(self),
alt=self.alt)
return None
@property
def children(self):
"""A query over all children that are not hidden or undiscoverable.
want undiscoverable then use ``children.include_undiscoverable(True)``.
"""
repl_query = self.datamodel.get_child_replacements(self)
if repl_query is not None:
return repl_query.include_undiscoverable(False)
return Query(path=self['_path'], pad=self.pad, alt=self.alt)
@property
def attachments(self):
"""Returns a query for the attachments of this record."""
return AttachmentsQuery(path=self['_path'], pad=self.pad,
alt=self.alt)
def has_prev(self):
return self.get_siblings().prev_page is not None
def has_next(self):
return self.get_siblings().next_page is not None
def get_siblings(self):
"""The next and previous children of this page's parent.
Uses parent's pagination query, if any, else parent's "children" config.
"""
siblings = Siblings(self, *self._siblings)
ctx = get_ctx()
if ctx:
ctx.pad.db.track_record_dependency(siblings)
return siblings
@cached_property
def _siblings(self):
parent = self.parent
pagination_enabled = parent.datamodel.pagination_config.enabled
# Don't track dependencies for this part.
with Context(pad=self.pad):
if pagination_enabled:
pagination = parent.pagination
siblings = list(pagination.config.get_pagination_query(parent))
else:
siblings = list(parent.children)
prev_item, next_item = None, None
try:
me = siblings.index(self)
except ValueError:
# Self not in parents.children or not in parents.pagination.
pass
else:
if me > 0:
prev_item = siblings[me - 1]
if me + 1 < len(siblings):
next_item = siblings[me + 1]
return prev_item, next_item
class Attachment(Record):
"""This represents a loaded attachment."""
is_attachment = True
@property
def source_filename(self):
if self.alt != PRIMARY_ALT:
suffix = '+%s.lr' % self.alt
else:
suffix = '.lr'
return self.pad.db.to_fs_path(self['_path']) + suffix
def _is_considered_hidden(self):
# Attachments are only considered hidden if they have been
# configured as such. This means that even if a record itself is
# hidden, the attachments by default will not.
parent = self.parent
if parent is None:
return False
return parent.datamodel.attachment_config.hidden
@property
def record(self):
return self
@property
def attachment_filename(self):
return self.pad.db.to_fs_path(self['_path'])
@property
def parent(self):
"""The associated record for this attachment."""
return self.pad.get(self._data['_attachment_for'],
persist=self.pad.cache.is_persistent(self))
@cached_property
def contents(self):
return FileContents(self.attachment_filename)
def get_fallback_record_label(self, lang):
return self['_id']
def iter_source_filenames(self):
yield self.source_filename
yield self.attachment_filename
class Image(Attachment):
"""Specific class for image attachments."""
def __init__(self, pad, data, page_num=None):
Attachment.__init__(self, pad, data, page_num)
self._image_info = None
self._exif_cache = None
def _get_image_info(self):
if self._image_info is None:
with open(self.attachment_filename, 'rb') as f:
self._image_info = get_image_info(f)
return self._image_info
@property
def exif(self):
"""Provides access to the exif data."""
if self._exif_cache is None:
with open(self.attachment_filename, 'rb') as f:
self._exif_cache = read_exif(f)
return self._exif_cache
@property
def width(self):
"""The width of the image if possible to determine."""
rv = self._get_image_info()[1]
if rv is not None:
return rv
return Undefined('Width of image could not be determined.')
@property
def height(self):
"""The height of the image if possible to determine."""
rv = self._get_image_info()[2]
if rv is not None:
return rv
return Undefined('Height of image could not be determined.')
@property
def format(self):
"""Returns the format of the image."""
rv = self._get_image_info()[0]
if rv is not None:
return rv
return Undefined('The format of the image could not be determined.')
def thumbnail(self,
width=None, height=None, crop=None, mode=None,
upscale=None, quality=None):
"""Utility to create thumbnails."""
# `crop` exists to preserve backward-compatibility, and will be removed.
if crop is not None and mode is not None:
raise ValueError('Arguments `crop` and `mode` are mutually exclusive.')
if crop is not None:
warnings.warn(
'The `crop` argument is deprecated. Use `mode="crop"` instead.'
)
mode = "crop"
if mode is None:
mode = ThumbnailMode.DEFAULT
else:
mode = ThumbnailMode.from_label(mode)
if width is not None:
width = int(width)
if height is not None:
height = int(height)
return make_image_thumbnail(_require_ctx(self),
self.attachment_filename, self.url_path,
width=width, height=height, mode=mode,
upscale=upscale, quality=quality)
def require_ffmpeg(f):
"""Decorator to help with error messages for ffmpeg template functions."""
# If both ffmpeg and ffprobe executables are available we don't need to
# override the function
if locate_executable('ffmpeg') and locate_executable('ffprobe'):
return f
@functools.wraps(f)
def wrapper(*args, **kwargs):
return Undefined('Unable to locate ffmpeg or ffprobe executable. Is '
'it installed?')
return wrapper
class Video(Attachment):
"""Specific class for video attachments."""
def __init__(self, pad, data, page_num=None):
Attachment.__init__(self, pad, data, page_num)
self._video_info = None
def _get_video_info(self):
if self._video_info is None:
try:
self._video_info = get_video_info(self.attachment_filename)
except RuntimeError:
# A falsy value ensures we don't retry this video again
self._video_info = False
return self._video_info
@property
@require_ffmpeg
def width(self):
"""Returns the width of the video if possible to determine."""
rv = self._get_video_info()
if rv:
return rv['width']
return Undefined('The width of the video could not be determined.')
@property
@require_ffmpeg
def height(self):
"""Returns the height of the video if possible to determine."""
rv = self._get_video_info()
if rv:
return rv['height']
return Undefined('The height of the video could not be determined.')
@property
@require_ffmpeg
def duration(self):
"""Returns the duration of the video if possible to determine."""
rv = self._get_video_info()
if rv:
return rv['duration']
return Undefined('The duration of the video could not be determined.')
@require_ffmpeg
def frame(self, seek=None):
"""Returns a VideoFrame object that is thumbnailable like an Image."""
rv = self._get_video_info()
if not rv:
return Undefined('Unable to get video properties.')
if seek is None:
seek = rv["duration"] / 2
return VideoFrame(self, seek)
class VideoFrame(object):
"""Representation of a specific frame in a VideoAttachment.
This is currently only useful for thumbnails, but in the future it might
work like an ImageAttachment.
"""
def __init__(self, video, seek):
self.video = video
if not isinstance(seek, timedelta):
seek = timedelta(seconds=seek)
if seek < timedelta(0):
raise ValueError("Seek distance must not be negative")
if video.duration and seek > video.duration:
raise ValueError(
"Seek distance must not be outside the video duration")
self.seek = seek
def __str__(self):
raise NotImplementedError('It is currently not possible to use video '
'frames directly, use .thumbnail().')
__unicode__ = __str__
@require_ffmpeg
def thumbnail(self, width=None, height=None, mode=None, upscale=None,
quality=None):
"""Utility to create thumbnails."""
if mode is None:
mode = ThumbnailMode.DEFAULT
else:
mode = ThumbnailMode.from_label(mode)
video = self.video
return make_video_thumbnail(
_require_ctx(video), video.attachment_filename, video.url_path,
seek=self.seek, width=width, height=height, mode=mode,
upscale=upscale, quality=quality)
attachment_classes = {
'image': Image,
'video': Video,
}
class Query(object):
"""Object that helps finding records. The default configuration
only finds pages.
"""
def __init__(self, path, pad, alt=PRIMARY_ALT):
self.path = path
self.pad = pad
self.alt = alt
self._include_pages = True
self._include_attachments = False
self._order_by = None
self._filters = None
self._pristine = True
self._limit = None
self._offset = None
self._include_hidden = None
self._include_undiscoverable = False
self._page_num = None
self._filter_func = None
def __get_lektor_param_hash__(self, h):
h.update(str(self.alt))
h.update(str(self._include_pages))
h.update(str(self._include_attachments))
h.update('(%s)' % u'|'.join(self._order_by or ()).encode('utf-8'))
h.update(str(self._limit))
h.update(str(self._offset))
h.update(str(self._include_hidden))
h.update(str(self._include_undiscoverable))
h.update(str(self._page_num))
@property
def self(self):
"""Returns the object this query starts out from."""
return self.pad.get(self.path, alt=self.alt)
def _clone(self, mark_dirty=False):
"""Makes a flat copy but keeps the other data on it shared."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
if mark_dirty:
rv._pristine = False
return rv
def _get(self, id, persist=True, page_num=Ellipsis):
"""Low level record access."""
if page_num is Ellipsis:
page_num = self._page_num
return self.pad.get('%s/%s' % (self.path, id), persist=persist,
alt=self.alt, page_num=page_num)
def _matches(self, record):
include_hidden = self._include_hidden
if include_hidden is not None:
if not self._include_hidden and record.is_hidden:
return False
if not self._include_undiscoverable and record.is_undiscoverable:
return False
for filter in self._filters or ():
if not save_eval(filter, record):
return False
return True
def _iterate(self):
"""Low level record iteration."""
# If we iterate over children we also need to track those
# dependencies. There are two ways in which we track them. The
# first is through the start record of the query. If that does
# not work for whatever reason (because it does not exist for
# instance).
self_record = self.pad.get(self.path, alt=self.alt)
if self_record is not None:
self.pad.db.track_record_dependency(self_record)
# We also always want to record the path itself as dependency.
ctx = get_ctx()
if ctx is not None:
ctx.record_dependency(self.pad.db.to_fs_path(self.path))
for name, _, is_attachment in self.pad.db.iter_items(
self.path, alt=self.alt):
if not ((is_attachment == self._include_attachments) or
(not is_attachment == self._include_pages)):
continue
record = self._get(name, persist=False)
if self._matches(record):
yield record
def filter(self, expr):
"""Filters records by an expression."""
rv = self._clone(mark_dirty=True)
rv._filters = list(self._filters or ())
if callable(expr):
expr = _CallbackExpr(expr)
rv._filters.append(expr)
return rv
def get_order_by(self):
"""Returns the order that should be used."""
if self._order_by is not None:
return self._order_by
base_record = self.pad.get(self.path)
if base_record is not None:
if self._include_attachments and not self._include_pages:
return base_record.datamodel.attachment_config.order_by
elif self._include_pages and not self._include_attachments:
return base_record.datamodel.child_config.order_by
# Otherwise the query includes either both or neither
# attachments and/nor children. I have no idea which
# value of order_by to use. We could punt and return
# child_config.order_by, but for now, just return None.
return None
return None
def include_hidden(self, value):
"""Controls if hidden records should be included which will not
happen by default for queries to children.
"""
rv = self._clone(mark_dirty=True)
rv._include_hidden = value
return rv
def include_undiscoverable(self, value):
"""Controls if undiscoverable records should be included as well."""
rv = self._clone(mark_dirty=True)
rv._include_undiscoverable = value
# If we flip from not including undiscoverables to discoverables
# but we did not yet decide on the value of _include_hidden it
# becomes False to not include it.
if rv._include_hidden is None and value:
rv._include_hidden = False
return rv
def request_page(self, page_num):
"""Requests a specific page number instead of the first."""
rv = self._clone(mark_dirty=True)
rv._page_num = page_num
return rv
def first(self):
"""Loads all matching records as list."""
return next(iter(self), None)
def all(self):
"""Loads all matching records as list."""
return list(self)
def order_by(self, *fields):
"""Sets the ordering of the query."""
rv = self._clone()
rv._order_by = fields or None
return rv
def offset(self, offset):
"""Sets the ordering of the query."""
rv = self._clone(mark_dirty=True)
rv._offset = offset
return rv
def limit(self, limit):
"""Sets the ordering of the query."""
rv = self._clone(mark_dirty=True)
rv._limit = limit
return rv
def count(self):
"""Counts all matched objects."""
rv = 0
for item in self:
rv += 1
return rv
def distinct(self, fieldname):
"""Set of unique values for the given field."""
rv = set()
for item in self:
if fieldname in item._data:
value = item._data[fieldname]
if isinstance(value, (list, tuple)):
rv |= set(value)
elif not isinstance(value, Undefined):
rv.add(value)
return rv
def get(self, id, page_num=Ellipsis):
"""Gets something by the local path."""
# If we're not pristine, we need to query here
if not self._pristine:
q = self.filter(F._id == id)
if page_num is not Ellipsis:
q = q.request_page(page_num)
return q.first()
# otherwise we can load it directly.
return self._get(id, page_num=page_num)
def __bool__(self):
return self.first() is not None
__nonzero__ = __bool__
def __iter__(self):
"""Iterates over all records matched."""
iterable = self._iterate()
order_by = self.get_order_by()
if order_by:
iterable = sorted(
iterable, key=lambda x: x.get_sort_key(order_by))
if self._offset is not None or self._limit is not None:
iterable = islice(iterable, self._offset or 0,
(self._offset or 0) + self._limit)
for item in iterable:
yield item
def __repr__(self):
return '<%s %r%s>' % (
self.__class__.__name__,
self.path,
self.alt and ' alt=%r' % self.alt or '',
)
class EmptyQuery(Query):
def _get(self, id, persist=True, page_num=Ellipsis):
pass
def _iterate(self):
"""Low level record iteration."""
return iter(())
class AttachmentsQuery(Query):
"""Specialized query class that only finds attachments."""
def __init__(self, path, pad, alt=PRIMARY_ALT):
Query.__init__(self, path, pad, alt=PRIMARY_ALT)
self._include_pages = False
self._include_attachments = True
@property
def images(self):
"""Filters to images."""
return self.filter(F._attachment_type == 'image')
@property
def videos(self):
"""Filters to videos."""
return self.filter(F._attachment_type == 'video')
@property
def audio(self):
"""Filters to audio."""
return self.filter(F._attachment_type == 'audio')
@property
def documents(self):
"""Filters to documents."""
return self.filter(F._attachment_type == 'document')
@property
def text(self):
"""Filters to plain text data."""
return self.filter(F._attachment_type == 'text')
def _iter_filename_choices(fn_base, alts, config, fallback=True):
"""Returns an iterator over all possible filename choices to .lr files
below a base filename that matches any of the given alts.
"""
# the order here is important as attachments can exist without a .lr
# file and as such need to come second or the loading of raw data will
# implicitly say the record exists.
for alt in alts:
if alt != PRIMARY_ALT and config.is_valid_alternative(alt):
yield os.path.join(fn_base, 'contents+%s.lr' % alt), alt, False
if fallback or PRIMARY_ALT in alts:
yield os.path.join(fn_base, 'contents.lr'), PRIMARY_ALT, False
for alt in alts:
if alt != PRIMARY_ALT and config.is_valid_alternative(alt):
yield fn_base + '+%s.lr' % alt, alt, True
if fallback or PRIMARY_ALT in alts:
yield fn_base + '.lr', PRIMARY_ALT, True
def _iter_content_files(dir_path, alts):
"""Returns an iterator over all existing content files below the given
directory. This yields specific files for alts before it falls back
to the primary alt.
"""
for alt in alts:
if alt == PRIMARY_ALT:
continue
if os.path.isfile(os.path.join(dir_path, 'contents+%s.lr' % alt)):
yield alt
if os.path.isfile(os.path.join(dir_path, 'contents.lr')):
yield PRIMARY_ALT
def _iter_datamodel_choices(datamodel_name, path, is_attachment=False):
yield datamodel_name
if not is_attachment:
yield posixpath.basename(path).split('.')[0].replace('-', '_').lower()
yield 'page'
yield 'none'
class Database(object):
def __init__(self, env, config=None):
self.env = env
if config is None:
config = env.load_config()
self.config = config
self.datamodels = load_datamodels(env)
self.flowblocks = load_flowblocks(env)
def to_fs_path(self, path):
"""Convenience function to convert a path into an file system path."""
return os.path.join(self.env.root_path, 'content',
untrusted_to_os_path(path))
def load_raw_data(self, path, alt=PRIMARY_ALT, cls=None,
fallback=True):
"""Internal helper that loads the raw record data. This performs
very little data processing on the data.
"""
path = cleanup_path(path)
if cls is None:
cls = dict
fn_base = self.to_fs_path(path)
rv = cls()
rv_type = None
choiceiter = _iter_filename_choices(fn_base, [alt], self.config,
fallback=fallback)
for fs_path, source_alt, is_attachment in choiceiter:
# If we already determined what our return value is but the
# type mismatches what we try now, we have to abort. Eg:
# a page can not become an attachment or the other way round.
if rv_type is not None and rv_type != is_attachment:
break
try:
with open(fs_path, 'rb') as f:
if rv_type is None:
rv_type = is_attachment
for key, lines in metaformat.tokenize(f, encoding='utf-8'):
if key not in rv:
rv[key] = u''.join(lines)
except IOError as e:
if e.errno not in (errno.ENOTDIR, errno.ENOENT):
raise
if not is_attachment or not os.path.isfile(fs_path[:-3]):
continue
# Special case: we are loading an attachment but the meta
# data file does not exist. In that case we still want to
# record that we're loading an attachment.
elif is_attachment:
rv_type = True
if '_source_alt' not in rv:
rv['_source_alt'] = source_alt
if rv_type is None:
return None
rv['_path'] = path
rv['_id'] = posixpath.basename(path)
rv['_gid'] = hashlib.md5(path.encode('utf-8')).hexdigest()
rv['_alt'] = alt
if rv_type:
rv['_attachment_for'] = posixpath.dirname(path)
return rv
def iter_items(self, path, alt=PRIMARY_ALT):
"""Iterates over all items below a path and yields them as
tuples in the form ``(id, alt, is_attachment)``.
"""
fn_base = self.to_fs_path(path)
if alt is None:
alts = self.config.list_alternatives()
single_alt = False
else:
alts = [alt]
single_alt = True
choiceiter = _iter_filename_choices(fn_base, alts, self.config)
for fs_path, actual_alt, is_attachment in choiceiter:
if not os.path.isfile(fs_path):
continue
# This path is actually for an attachment, which means that we
# cannot have any items below it and will just abort with an
# empty iterator.
if is_attachment:
break
try:
dir_path = os.path.dirname(fs_path)
for filename in os.listdir(dir_path):
if not isinstance(filename, text_type):
try:
filename = filename.decode(fs_enc)
except UnicodeError:
continue
if filename.endswith('.lr') or \
self.env.is_uninteresting_source_name(filename):
continue
# We found an attachment. Attachments always live
# below the primary alt, so we report it as such.
if os.path.isfile(os.path.join(dir_path, filename)):
yield filename, PRIMARY_ALT, True
# We found a directory, let's make sure it contains a
# contents.lr file (or a contents+alt.lr file).
else:
for content_alt in _iter_content_files(
os.path.join(dir_path, filename), alts):
yield filename, content_alt, False
# If we want a single alt, we break here so
# that we only produce a single result.
# Otherwise this would also return the primary
# fallback here.
if single_alt:
break
except IOError as e:
if e.errno != errno.ENOENT:
raise
continue
# If we reach this point, we found our parent, so we can stop
# searching for more at this point.
break
def get_datamodel_for_raw_data(self, raw_data, pad=None):
"""Returns the datamodel that should be used for a specific raw
data. This might require the discovery of a parent object through
the pad.
"""
path = raw_data['_path']
is_attachment = bool(raw_data.get('_attachment_for'))
datamodel = (raw_data.get('_model') or '').strip() or None
return self.get_implied_datamodel(path, is_attachment, pad,
datamodel=datamodel)
def iter_dependent_models(self, datamodel):
seen = set()
def deep_find(datamodel):
seen.add(datamodel)
if datamodel.parent is not None and datamodel.parent not in seen:
deep_find(datamodel.parent)
for related_dm_name in (datamodel.child_config.model,
datamodel.attachment_config.model):
dm = self.datamodels.get(related_dm_name)
if dm is not None and dm not in seen:
deep_find(dm)
deep_find(datamodel)
seen.discard(datamodel)
return iter(seen)
def get_implied_datamodel(self, path, is_attachment=False, pad=None,
datamodel=None):
"""Looks up a datamodel based on the information about the parent
of a model.
"""
dm_name = datamodel
# Only look for a datamodel if there was not defined.
if dm_name is None:
parent = posixpath.dirname(path)
dm_name = None
# If we hit the root, and there is no model defined we need
# to make sure we do not recurse onto ourselves.
if parent != path:
if pad is None:
pad = self.new_pad()
parent_obj = pad.get(parent)
if parent_obj is not None:
if is_attachment:
dm_name = parent_obj.datamodel.attachment_config.model
else:
dm_name = parent_obj.datamodel.child_config.model
for dm_name in _iter_datamodel_choices(dm_name, path, is_attachment):
# If that datamodel exists, let's roll with it.
datamodel = self.datamodels.get(dm_name)
if datamodel is not None:
return datamodel
raise AssertionError("Did not find an appropriate datamodel. "
"That should never happen.")
def get_attachment_type(self, path):
"""Gets the attachment type for a path."""
return self.config['ATTACHMENT_TYPES'].get(
posixpath.splitext(path)[1].lower())
def track_record_dependency(self, record):
ctx = get_ctx()
if ctx is not None:
for filename in record.iter_source_filenames():
ctx.record_dependency(filename)
for virtual_source in record.iter_virtual_sources():
ctx.record_virtual_dependency(virtual_source)
if getattr(record, 'datamodel', None) and record.datamodel.filename:
ctx.record_dependency(record.datamodel.filename)
for dep_model in self.iter_dependent_models(record.datamodel):
if dep_model.filename:
ctx.record_dependency(dep_model.filename)
return record
def get_default_slug(self, data, pad):
parent_path = posixpath.dirname(data['_path'])
parent = None
if parent_path != data['_path']:
parent = pad.get(parent_path)
if parent:
slug = parent.datamodel.get_default_child_slug(pad, data)
else:
slug = ''
return slug
def process_data(self, data, datamodel, pad):
# Automatically fill in slugs
if not data['_slug']:
data['_slug'] = self.get_default_slug(data, pad)
else:
data['_slug'] = data['_slug'].strip('/')
# For attachments figure out the default attachment type if it's
# not yet provided.
if is_undefined(data['_attachment_type']) and \
data['_attachment_for']:
data['_attachment_type'] = self.get_attachment_type(data['_path'])
# Automatically fill in templates
if is_undefined(data['_template']):
data['_template'] = datamodel.get_default_template_name()
def get_record_class(self, datamodel, raw_data):
"""Returns the appropriate record class for a datamodel and raw data."""
is_attachment = bool(raw_data.get('_attachment_for'))
if not is_attachment:
return Page
attachment_type = raw_data['_attachment_type']
return attachment_classes.get(attachment_type, Attachment)
def new_pad(self):
"""Creates a new pad object for this database."""
return Pad(self)
def _split_alt_from_url(config, clean_path):
primary = config.primary_alternative
# The alternative system is not configured, just return
if primary is None:
return None, clean_path
# First try to find alternatives that are identified by a prefix.
for prefix, alt in config.get_alternative_url_prefixes():
if clean_path.startswith(prefix):
return alt, clean_path[len(prefix):].strip('/')
# Special case which is the URL root.
elif prefix.strip('/') == clean_path:
return alt, ''
# Now find alternatives that are identified by a suffix.
for suffix, alt in config.get_alternative_url_suffixes():
if clean_path.endswith(suffix):
return alt, clean_path[:-len(suffix)].strip('/')
# If we have a primary alternative without a prefix and suffix, we can
# return that one.
if config.primary_alternative_is_rooted:
return None, clean_path
return None, None
class Pad(object):
def __init__(self, db):
self.db = db
self.cache = RecordCache(db.config['EPHEMERAL_RECORD_CACHE_SIZE'])
self.databags = Databags(db.env)
@property
def config(self):
"""The config for this pad."""
return self.db.config
@property
def env(self):
"""The env for this pad."""
return self.db.env
def make_absolute_url(self, url):
"""Given a URL this makes it absolute if this is possible."""
base_url = self.db.config['PROJECT'].get('url')
if base_url is None:
raise RuntimeError('To use absolute URLs you need to configure '
'the URL in the project config.')
return url_join(base_url.rstrip('/') + '/', url.lstrip('/'))
def make_url(self, url, base_url=None, absolute=None, external=None):
"""Helper method that creates a finalized URL based on the parameters
provided and the config.
"""
url_style = self.db.config.url_style
if absolute is None:
absolute = url_style == 'absolute'
if external is None:
external = url_style == 'external'
if external:
external_base_url = self.db.config.base_url
if external_base_url is None:
raise RuntimeError('To use absolute URLs you need to '
'configure the URL in the project config.')
return url_join(external_base_url, url.lstrip('/'))
if absolute:
return url_join(self.db.config.base_path, url.lstrip('/'))
if base_url is None:
raise RuntimeError('Cannot calculate a relative URL if no base '
'URL has been provided.')
return make_relative_url(base_url, url)
def resolve_url_path(self, url_path, include_invisible=False,
include_assets=True, alt_fallback=True):
"""Given a URL path this will find the correct record which also
might be an attachment. If a record cannot be found or is unexposed
the return value will be `None`.
"""
pieces = clean_path = cleanup_path(url_path).strip('/')
# Split off the alt and if no alt was found, point it to the
# primary alternative. If the clean path comes back as `None`
# then the config does not include a rooted alternative and we
# have to skip handling of regular records.
alt, clean_path = _split_alt_from_url(self.db.config, clean_path)
if clean_path is not None:
if not alt:
if alt_fallback:
alt = self.db.config.primary_alternative or PRIMARY_ALT
else:
alt = PRIMARY_ALT
node = self.get_root(alt=alt)
if node is None:
raise RuntimeError('Tree root could not be found.')
pieces = clean_path.split('/')
if pieces == ['']:
pieces = []
rv = node.resolve_url_path(pieces)
if rv is not None and (include_invisible or rv.is_visible):
return rv
if include_assets:
for asset_root in [self.asset_root] + self.theme_asset_roots:
rv = asset_root.resolve_url_path(pieces)
if rv is not None:
break
return rv
return None
def get_root(self, alt=PRIMARY_ALT):
"""The root page of the database."""
return self.get('/', alt=alt, persist=True)
root = property(get_root)
@property
def asset_root(self):
"""The root of the asset tree."""
return Directory(self, name='',
path=os.path.join(self.db.env.root_path, 'assets'))
@property
def theme_asset_roots(self):
"""The root of the asset tree of each theme."""
asset_roots = []
for theme_path in self.db.env.theme_paths:
asset_roots.append(Directory(self, name='',
path=os.path.join(theme_path, 'assets')))
return asset_roots
def get_all_roots(self):
"""Returns all the roots for building."""
rv = []
for alt in self.db.config.list_alternatives():
rv.append(self.get_root(alt=alt))
# If we don't have any alternatives, then we go with the implied
# root.
if not rv and self.root:
rv = [self.root]
rv.append(self.asset_root)
rv.extend(self.theme_asset_roots)
return rv
def get_virtual(self, record, virtual_path):
"""Resolves a virtual path below a record."""
pieces = virtual_path.strip('/').split('/')
if not pieces or pieces == ['']:
return record
if pieces[0].isdigit():
if len(pieces) == 1:
return self.get(record['_path'], alt=record.alt, page_num=int(pieces[0]))
return None
resolver = self.env.virtual_sources.get(pieces[0])
if resolver is None:
return None
return resolver(record, pieces[1:])
def get(self, path, alt=PRIMARY_ALT, page_num=None, persist=True,
allow_virtual=True):
"""Loads a record by path."""
virt_markers = path.count('@')
# If the virtual marker is included, we also want to look up the
# virtual path below an item. Special case: if virtual paths are
# not allowed but one was passed, we just return `None`.
if virt_markers == 1:
if page_num is not None:
raise RuntimeError('Cannot use both virtual paths and '
'explicit page number lookups. You '
'need to one or the other.')
if not allow_virtual:
return None
path, virtual_path = path.split('@', 1)
rv = self.get(path, alt=alt, page_num=page_num,
persist=persist)
if rv is None:
return None
return self.get_virtual(rv, virtual_path)
# Sanity check: there must only be one or things will get weird.
elif virt_markers > 1:
return None
path = cleanup_path(path)
virtual_path = None
if page_num is not None:
virtual_path = str(page_num)
rv = self.cache.get(path, alt, virtual_path)
if rv is not Ellipsis:
if rv is not None:
self.db.track_record_dependency(rv)
return rv
raw_data = self.db.load_raw_data(path, alt=alt)
if raw_data is None:
self.cache.remember_as_missing(path, alt, virtual_path)
return None
rv = self.instance_from_data(raw_data, page_num=page_num)
if persist:
self.cache.persist(rv)
else:
self.cache.remember(rv)
return self.db.track_record_dependency(rv)
def alt_exists(self, path, alt=PRIMARY_ALT, fallback=False):
"""Checks if an alt exists."""
path = cleanup_path(path)
if '@' in path:
return False
# If we find the path in the cache, check if it was loaded from
# the right source alt.
rv = self.get(path, alt)
if rv is not None:
if rv['_source_alt'] == alt:
return True
elif (fallback or
(rv['_source_alt'] == PRIMARY_ALT and
alt == self.config.primary_alternative)):
return True
return False
return False
def get_asset(self, path):
"""Loads an asset by path."""
clean_path = cleanup_path(path).strip('/')
nodes = [self.asset_root] + self.theme_asset_roots
for node in nodes:
for piece in clean_path.split('/'):
node = node.get_child(piece)
if node is None:
break
if node is not None:
return node
return None
def instance_from_data(self, raw_data, datamodel=None, page_num=None):
"""This creates an instance from the given raw data."""
if datamodel is None:
datamodel = self.db.get_datamodel_for_raw_data(raw_data, self)
data = datamodel.process_raw_data(raw_data, self)
self.db.process_data(data, datamodel, self)
cls = self.db.get_record_class(datamodel, data)
return cls(self, data, page_num=page_num)
def query(self, path=None, alt=PRIMARY_ALT):
"""Queries the database either at root level or below a certain
path. This is the recommended way to interact with toplevel data.
The alternative is to work with the :attr:`root` document.
"""
# Don't accidentally pass `None` down to the query as this might
# do some unexpected things.
if alt is None:
alt = PRIMARY_ALT
return Query(path='/' + (path or '').strip('/'), pad=self, alt=alt) \
.include_hidden(True)
class TreeItem(object):
"""Represents a single tree item and all the alts within it."""
def __init__(self, tree, path, alts, exists=True,
is_attachment=False, attachment_type=None,
can_have_children=False, can_have_attachments=False,
can_be_deleted=False, is_visible=True,
label_i18n=None):
self.tree = tree
self.path = path
self.alts = alts
self.exists = exists
self.is_attachment = is_attachment
self.attachment_type = attachment_type
self.can_have_children = can_have_children
self.can_have_attachments = can_have_attachments
self.can_be_deleted = can_be_deleted
self.is_visible = is_visible
self.label_i18n = label_i18n
@property
def id(self):
"""The local ID of the item."""
return posixpath.basename(self.path)
def get_parent(self):
"""Returns the parent item."""
if self.path == '/':
return None
return self.tree.get(posixpath.dirname(self.path))
def get(self, path):
"""Returns a child within this item."""
if self.exists:
return self.tree.get(posixpath.join(self.path, path))
return None
def iter_children(self, include_attachments=True, include_pages=True):
"""Iterates over all children."""
if not self.exists:
return iter(())
return self.tree.iter_children(self.path, include_attachments,
include_pages)
def get_children(self, offset=0, limit=None, include_attachments=True,
include_pages=True):
"""Returns a list of all children."""
if not self.exists:
return []
return self.tree.get_children(self.path, offset, limit,
include_attachments, include_pages)
def __repr__(self):
return '<TreeItem %r%s>' % (
self.path,
self.is_attachment and ' attachment' or '',
)
class Alt(object):
def __init__(self, id, record):
self.id = id
self.record = record
self.exists = record is not None and \
os.path.isfile(record.source_filename)
def __repr__(self):
return '<Alt %r%s>' % (self.id, self.exists and '*' or '')
class Tree(object):
"""Special object that can be used to get a broader insight into the
database in a way that is not bound to the alt system directly.
This wraps a pad and provides additional ways to interact with the data
of the database in a way that is similar to how the data is actually laid
out on the file system and not as the data is represented. Primarily the
difference is how alts are handled. Where the pad resolves the alts
automatically to make the handling automatic, the tree will give access
to the underlying alts automatically.
"""
def __init__(self, pad):
self.pad = pad
def get(self, path=None, persist=True):
"""Returns a path item at the given node."""
path = '/' + (path or '').strip('/')
alts = {}
exists = False
first_record = None
label_i18n = None
for alt in chain([PRIMARY_ALT], self.pad.db.config.list_alternatives()):
record = self.pad.get(path, alt=alt, persist=persist,
allow_virtual=False)
if first_record is None:
first_record = record
if record is not None:
exists = True
alts[alt] = Alt(alt, record)
# These flags only really make sense if we found an existing
# record, otherwise we fall back to some sort of sane default.
# Note that in theory different alts can disagree on what
# datamodel they use but this is something that is really not
# supported. This cannot happen if you edit based on the admin
# panel and if you edit it manually and screw up the part, we
# cannot really do anything about it.
#
# More importantly we genreally assume that first_record is the
# primary alt. There are some situations in which case this is
# not true, for instance if no primary alt exists. In this case
# we just go with any record.
is_visible = True
is_attachment = False
attachment_type = None
can_have_children = False
can_have_attachments = False
can_be_deleted = exists and path != '/'
if first_record is not None:
is_attachment = first_record.is_attachment
is_visible = first_record.is_visible
dm = first_record.datamodel
if not is_attachment:
can_have_children = dm.has_own_children
can_have_attachments = dm.has_own_attachments
if dm.protected:
can_be_deleted = False
else:
attachment_type = first_record['_attachment_type'] or None
label_i18n = first_record.get_record_label_i18n()
return TreeItem(self, path, alts, exists, is_attachment=is_attachment,
attachment_type=attachment_type,
can_have_children=can_have_children,
can_have_attachments=can_have_attachments,
can_be_deleted=can_be_deleted,
is_visible=is_visible, label_i18n=label_i18n)
def _get_child_ids(self, path=None, include_attachments=True,
include_pages=True):
"""Returns a sorted list of just the IDs of children below a path."""
path = '/' + (path or '').strip('/')
names = set()
for name, _, is_attachment in self.pad.db.iter_items(
path, alt=None):
if (is_attachment and include_attachments) or \
(not is_attachment and include_pages):
names.add(name)
return sorted(names, key=lambda x: x.lower())
def iter_children(self, path=None, include_attachments=True,
include_pages=True):
"""Iterates over all children below a path."""
path = '/' + (path or '').strip('/')
for name in self._get_child_ids(path, include_attachments,
include_pages):
yield self.get(posixpath.join(path, name), persist=False)
def get_children(self, path=None, offset=0, limit=None,
include_attachments=True, include_pages=True):
"""Returns a slice of children."""
path = '/' + (path or '').strip('/')
end = None
if limit is not None:
end = offset + limit
return [self.get(posixpath.join(path, name), persist=False)
for name in self._get_child_ids(
path, include_attachments, include_pages)[offset:end]]
def edit(self, path, is_attachment=None, alt=PRIMARY_ALT, datamodel=None):
"""Edits a record by path."""
return make_editor_session(self.pad, cleanup_path(path), alt=alt,
is_attachment=is_attachment,
datamodel=datamodel)
class RecordCache(object):
"""The record cache holds records eitehr in an persistent or ephemeral
section which helps the pad not load records it already saw.
"""
def __init__(self, ephemeral_cache_size=1000):
self.persistent = {}
self.ephemeral = LRUCache(ephemeral_cache_size)
def _get_cache_key(self, record_or_path, alt=PRIMARY_ALT,
virtual_path=None):
if isinstance(record_or_path, string_types):
path = record_or_path.strip('/')
else:
path, virtual_path = split_virtual_path(record_or_path.path)
path = path.strip('/')
virtual_path = virtual_path or None
alt = record_or_path.alt
return (path, alt, virtual_path)
def flush(self):
"""Flushes the cache"""
self.persistent.clear()
self.ephemeral.clear()
def is_persistent(self, record):
"""Indicates if a record is in the persistent record cache."""
cache_key = self._get_cache_key(record)
return cache_key in self.persistent
def remember(self, record):
"""Remembers the record in the record cache."""
cache_key = self._get_cache_key(record)
if cache_key not in self.persistent and cache_key not in self.ephemeral:
self.ephemeral[cache_key] = record
def persist(self, record):
"""Persists a record. This will put it into the persistent cache."""
cache_key = self._get_cache_key(record)
self.persistent[cache_key] = record
try:
del self.ephemeral[cache_key]
except KeyError:
pass
def persist_if_cached(self, record):
"""If the record is already ephemerally cached, this promotes it to
the persistent cache section.
"""
cache_key = self._get_cache_key(record)
if cache_key in self.ephemeral:
self.persist(record)
def get(self, path, alt=PRIMARY_ALT, virtual_path=None):
"""Looks up a record from the cache."""
cache_key = self._get_cache_key(path, alt, virtual_path)
rv = self.persistent.get(cache_key, Ellipsis)
if rv is not Ellipsis:
return rv
rv = self.ephemeral.get(cache_key, Ellipsis)
if rv is not Ellipsis:
return rv
return Ellipsis
def remember_as_missing(self, path, alt=PRIMARY_ALT, virtual_path=None):
cache_key = self._get_cache_key(path, alt, virtual_path)
self.persistent.pop(cache_key, None)
self.ephemeral[cache_key] = None
|
the-stack_106_31611 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for linuxbridge rpc
"""
import stubout
import unittest2
from quantum.agent import rpc as agent_rpc
from quantum.common import topics
from quantum.openstack.common import context
from quantum.openstack.common import rpc
from quantum.plugins.linuxbridge import lb_quantum_plugin as plb
class rpcApiTestCase(unittest2.TestCase):
def _test_lb_api(self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_delete_network(self):
rpcapi = plb.AgentNotifierApi(topics.AGENT)
self._test_lb_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = plb.AgentNotifierApi(topics.AGENT)
self._test_lb_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
physical_network='fake_net',
vlan_id='fake_vlan_id')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_lb_api(rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_lb_api(rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
|
the-stack_106_31612 | import math
from datetime import datetime, timedelta
from typing import List, Optional, Tuple
from fastapi.encoders import jsonable_encoder
from dispatch.config import ANNUAL_COST_EMPLOYEE, BUSINESS_HOURS_YEAR
from dispatch.database import SessionLocal
from dispatch.incident_priority import service as incident_priority_service
from dispatch.incident_priority.models import IncidentPriorityType
from dispatch.incident_type import service as incident_type_service
from dispatch.participant import flows as participant_flows
from dispatch.participant_role import service as participant_role_service
from dispatch.participant_role.models import ParticipantRoleType
from dispatch.plugins.base import plugins
from .enums import IncidentStatus
from .models import Incident, IncidentUpdate
HOURS_IN_DAY = 24
SECONDS_IN_HOUR = 3600
def resolve_incident_commander_email(
db_session: SessionLocal,
reporter_email: str,
incident_type: str,
incident_priority: str,
incident_name: str,
incident_title: str,
incident_description: str,
):
"""Resolves the correct incident commander email based on given parameters."""
if incident_priority == IncidentPriorityType.info:
return reporter_email
commander_service = incident_type_service.get_by_name(
db_session=db_session, name=incident_type
).commander_service
p = plugins.get(commander_service.type)
# page for high priority incidents
# we could do this at the end but it seems pretty important...
if incident_priority == IncidentPriorityType.high:
p.page(
service_id=commander_service.external_id,
incident_name=incident_name,
incident_title=incident_title,
incident_description=incident_description,
)
return p.get(service_id=commander_service.external_id)
def get(*, db_session, incident_id: str) -> Optional[Incident]:
"""Returns an incident based on the given id."""
return db_session.query(Incident).filter(Incident.id == incident_id).first()
def get_by_name(*, db_session, incident_name: str) -> Optional[Incident]:
"""Returns an incident based on the given name."""
return db_session.query(Incident).filter(Incident.name == incident_name).first()
def get_all(*, db_session) -> List[Optional[Incident]]:
"""Returns all incidents."""
return db_session.query(Incident)
def get_all_by_status(
*, db_session, status: IncidentStatus, skip=0, limit=100
) -> List[Optional[Incident]]:
"""Returns all incidents based on the given status."""
return (
db_session.query(Incident).filter(Incident.status == status).offset(skip).limit(limit).all()
)
def get_all_last_x_hours_by_status(
*, db_session, status: IncidentStatus, hours: int, skip=0, limit=100
) -> List[Optional[Incident]]:
"""Returns all incidents of a given status in the last x hours."""
now = datetime.utcnow()
if status == IncidentStatus.active:
return (
db_session.query(Incident)
.filter(Incident.status == IncidentStatus.active)
.filter(Incident.created_at >= now - timedelta(hours=hours))
.offset(skip)
.limit(limit)
.all()
)
if status == IncidentStatus.stable:
return (
db_session.query(Incident)
.filter(Incident.status == IncidentStatus.stable)
.filter(Incident.stable_at >= now - timedelta(hours=hours))
.offset(skip)
.limit(limit)
.all()
)
if status == IncidentStatus.closed:
return (
db_session.query(Incident)
.filter(Incident.status == IncidentStatus.closed)
.filter(Incident.closed_at >= now - timedelta(hours=hours))
.offset(skip)
.limit(limit)
.all()
)
def create(
*,
db_session,
incident_priority: str,
incident_type: str,
reporter_email: str,
title: str,
status: str,
description: str,
) -> Incident:
"""Creates a new incident."""
# We get the incident type by name
incident_type = incident_type_service.get_by_name(
db_session=db_session, name=incident_type["name"]
)
# We get the incident priority by name
incident_priority = incident_priority_service.get_by_name(
db_session=db_session, name=incident_priority["name"]
)
# We create the incident
incident = Incident(
title=title,
description=description,
status=status,
incident_type=incident_type,
incident_priority=incident_priority,
)
db_session.add(incident)
db_session.commit()
# We add the reporter to the incident
reporter_participant = participant_flows.add_participant(
reporter_email, incident.id, db_session, ParticipantRoleType.reporter
)
# We resolve the incident commander email
incident_commander_email = resolve_incident_commander_email(
db_session,
reporter_email,
incident_type.name,
incident_priority.name,
"",
title,
description,
)
if reporter_email == incident_commander_email:
# We add the role of incident commander the reporter
participant_role_service.add_role(
participant_id=reporter_participant.id,
participant_role=ParticipantRoleType.incident_commander,
db_session=db_session,
)
else:
# We create a new participant for the incident commander and we add it to the incident
participant_flows.add_participant(
incident_commander_email,
incident.id,
db_session,
ParticipantRoleType.incident_commander,
)
return incident
def update(*, db_session, incident: Incident, incident_in: IncidentUpdate) -> Incident:
incident_data = jsonable_encoder(incident)
incident_priority = incident_priority_service.get_by_name(
db_session=db_session, name=incident_in.incident_priority.name
)
incident_type = incident_type_service.get_by_name(
db_session=db_session, name=incident_in.incident_type.name
)
update_data = incident_in.dict(
skip_defaults=True, exclude={"incident_type", "incident_priority", "commander"}
)
for field in incident_data:
if field in update_data:
setattr(incident, field, update_data[field])
incident.incident_priority = incident_priority
incident.incident_type = incident_type
db_session.add(incident)
db_session.commit()
return incident
def delete(*, db_session, incident_id: int):
# TODO: When deleting, respect referential integrity here in the code. Or add cascading deletes
# in models.py.
db_session.query(Incident).filter(Incident.id == incident_id).delete()
db_session.commit()
def calculate_cost(incident_id: int, db_session: SessionLocal, incident_review=False):
"""Calculates the incident cost."""
# we ge the incident
incident = get(db_session=db_session, incident_id=incident_id)
participants_active_hours = 0
for participant in incident.participants:
participant_active_at = participant.active_at
participant_inactive_at = (
participant.inactive_at if participant.inactive_at else datetime.utcnow()
)
participant_active_time = participant_inactive_at - participant_active_at
participant_active_hours = participant_active_time.total_seconds() / SECONDS_IN_HOUR
# we assume that participants only spend ~10 hours/day working on the incident if the incident goes past 24hrs
if participant_active_hours > HOURS_IN_DAY:
days, hours = divmod(participant_active_hours, HOURS_IN_DAY)
participant_active_hours = math.ceil((days * HOURS_IN_DAY * 0.4) + hours)
participants_active_hours += participant_active_hours
num_participants = len(incident.participants)
# we calculate the number of hours spent responding per person using the 25/50/25 rule,
# where 25% of participants get a full share, 50% get a half share, and 25% get a quarter share
response_hours_full_share = num_participants * 0.25 * participants_active_hours
response_hours_half_share = num_participants * 0.5 * participants_active_hours * 0.5
response_hours_quarter_share = num_participants * 0.25 * participants_active_hours * 0.25
response_hours = (
response_hours_full_share + response_hours_half_share + response_hours_quarter_share
)
# we calculate the number of hours spent in incident review related activities
incident_review_hours = 0
if incident_review:
incident_review_prep = 1
incident_review_meeting = num_participants * 0.5 * 1
incident_review_hours = incident_review_prep + incident_review_meeting
# we calculate and round up the hourly rate
hourly_rate = math.ceil(ANNUAL_COST_EMPLOYEE / BUSINESS_HOURS_YEAR)
# we calculate, round up, and format the incident cost
incident_cost = f"{math.ceil((response_hours + incident_review_hours) * hourly_rate):.2f}"
return incident_cost
|
the-stack_106_31613 | # Copyright The IETF Trust 2012-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import io
import os
from django import forms
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect, Http404
from django.urls import reverse
from django.template.loader import render_to_string
from django.conf import settings
import debug # pyflakes:ignore
from ietf.doc.models import ( BallotDocEvent, BallotPositionDocEvent, DocAlias, DocEvent,
Document, NewRevisionDocEvent, State )
from ietf.doc.utils import ( add_state_change_event, close_open_ballots,
create_ballot_if_not_open, update_telechat )
from ietf.doc.mails import email_iana
from ietf.doc.forms import AdForm
from ietf.group.models import Role, Group
from ietf.iesg.models import TelechatDate
from ietf.ietfauth.utils import has_role, role_required, is_authorized_in_doc_stream
from ietf.person.models import Person
from ietf.utils import log
from ietf.utils.mail import send_mail_preformatted
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.mailtrigger.utils import gather_address_lists
class ChangeStateForm(forms.Form):
review_state = forms.ModelChoiceField(State.objects.filter(used=True, type="conflrev"), label="Conflict review state", empty_label=None, required=True)
comment = forms.CharField(widget=forms.Textarea, help_text="Optional comment for the review history.", required=False, strip=False)
@role_required("Area Director", "Secretariat")
def change_state(request, name, option=None):
"""Change state of an IESG review for IETF conflicts in other stream's documents, notifying parties as necessary
and logging the change as a comment."""
review = get_object_or_404(Document, type="conflrev", name=name)
login = request.user.person
if request.method == 'POST':
form = ChangeStateForm(request.POST)
if form.is_valid():
clean = form.cleaned_data
new_state = clean['review_state']
comment = clean['comment'].rstrip()
if comment:
c = DocEvent(type="added_comment", doc=review, rev=review.rev, by=login)
c.desc = comment
c.save()
prev_state = review.get_state()
if new_state != prev_state:
events = []
review.set_state(new_state)
events.append(add_state_change_event(review, login, prev_state, new_state))
review.save_with_history(events)
if new_state.slug == "iesgeval":
e = create_ballot_if_not_open(request, review, login, "conflrev") # pyflakes:ignore
ballot = review.latest_event(BallotDocEvent, type="created_ballot")
log.assertion('ballot == e')
if has_role(request.user, "Area Director") and not review.latest_event(BallotPositionDocEvent, ad=login, ballot=ballot, type="changed_ballot_position"):
# The AD putting a conflict review into iesgeval who doesn't already have a position is saying "yes"
pos = BallotPositionDocEvent(doc=review, rev=review.rev, by=login)
pos.ballot = ballot
pos.type = "changed_ballot_position"
pos.ad = login
pos.pos_id = "yes"
pos.desc = "[Ballot Position Update] New position, %s, has been recorded for %s" % (pos.pos.name, pos.ad.plain_name())
pos.save()
# Consider mailing that position to 'ballot_saved'
send_conflict_eval_email(request,review)
return redirect('ietf.doc.views_doc.document_main', name=review.name)
else:
s = review.get_state()
init = dict(review_state=s.pk if s else None)
form = ChangeStateForm(initial=init)
return render(request, 'doc/change_state.html',
dict(form=form,
doc=review,
login=login,
help_url=reverse('ietf.doc.views_help.state_help', kwargs=dict(type="conflict-review")),
))
def send_conflict_review_ad_changed_email(request, review, event):
addrs = gather_address_lists('conflrev_ad_changed', doc=review).as_strings(compact=False)
msg = render_to_string("doc/conflict_review/changed_ad.txt",
dict(frm = settings.DEFAULT_FROM_EMAIL,
to = addrs.to,
cc = addrs.cc,
by = request.user.person,
event = event,
review = review,
reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target.document,
review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(),
)
)
send_mail_preformatted(request,msg)
def send_conflict_review_started_email(request, review):
addrs = gather_address_lists('conflrev_requested',doc=review).as_strings(compact=False)
msg = render_to_string("doc/conflict_review/review_started.txt",
dict(frm = settings.DEFAULT_FROM_EMAIL,
to = addrs.to,
cc = addrs.cc,
by = request.user.person,
review = review,
reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target.document,
review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(),
)
)
if not has_role(request.user,"Secretariat"):
send_mail_preformatted(request,msg)
addrs = gather_address_lists('conflrev_requested_iana',doc=review).as_strings(compact=False)
email_iana(request,
review.relateddocument_set.get(relationship__slug='conflrev').target.document,
addrs.to,
msg,
cc=addrs.cc)
def send_conflict_eval_email(request,review):
msg = render_to_string("doc/eval_email.txt",
dict(doc=review,
doc_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(),
)
)
addrs = gather_address_lists('ballot_issued',doc=review).as_strings()
override = {'To':addrs.to}
if addrs.cc:
override['Cc']=addrs.cc
send_mail_preformatted(request,msg,override=override)
addrs = gather_address_lists('ballot_issued_iana',doc=review).as_strings()
email_iana(request,
review.relateddocument_set.get(relationship__slug='conflrev').target.document,
addrs.to,
msg,
addrs.cc)
class UploadForm(forms.Form):
content = forms.CharField(widget=forms.Textarea, label="Conflict review response", help_text="Edit the conflict review response.", required=False, strip=False)
txt = forms.FileField(label=".txt format", help_text="Or upload a .txt file.", required=False)
def clean_content(self):
return self.cleaned_data["content"].replace("\r", "")
def clean_txt(self):
return get_cleaned_text_file_content(self.cleaned_data["txt"])
def save(self, review):
filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev))
with io.open(filename, 'w', encoding='utf-8') as destination:
if self.cleaned_data['txt']:
destination.write(self.cleaned_data['txt'])
else:
destination.write(self.cleaned_data['content'])
#This is very close to submit on charter - can we get better reuse?
@role_required('Area Director','Secretariat')
def submit(request, name):
review = get_object_or_404(Document, type="conflrev", name=name)
login = request.user.person
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev))
not_uploaded_yet = review.rev == "00" and not os.path.exists(path)
if not_uploaded_yet:
# this case is special - the conflict review text document doesn't actually exist yet
next_rev = review.rev
else:
next_rev = "%02d" % (int(review.rev)+1)
if request.method == 'POST':
if "submit_response" in request.POST:
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
review.rev = next_rev
events = []
e = NewRevisionDocEvent(doc=review, by=login, type="new_revision")
e.desc = "New version available: <b>%s-%s.txt</b>" % (review.canonical_name(), review.rev)
e.rev = review.rev
e.save()
events.append(e)
# Save file on disk
form.save(review)
review.save_with_history(events)
return redirect('ietf.doc.views_doc.document_main', name=review.name)
elif "reset_text" in request.POST:
init = { "content": render_to_string("doc/conflict_review/review_choices.txt",dict())}
form = UploadForm(initial=init)
# Protect against handcrufted malicious posts
else:
form = None
else:
form = None
if not form:
init = { "content": ""}
if not_uploaded_yet:
init["content"] = render_to_string("doc/conflict_review/review_choices.txt",
dict(),
)
else:
filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev))
try:
with io.open(filename, 'r') as f:
init["content"] = f.read()
except IOError:
pass
form = UploadForm(initial=init)
return render(request, 'doc/conflict_review/submit.html',
{'form': form,
'next_rev': next_rev,
'review' : review,
'conflictdoc' : review.relateddocument_set.get(relationship__slug='conflrev').target.document,
})
@role_required("Area Director", "Secretariat")
def edit_ad(request, name):
"""Change the shepherding Area Director for this review."""
review = get_object_or_404(Document, type="conflrev", name=name)
if request.method == 'POST':
form = AdForm(request.POST)
if form.is_valid():
review.ad = form.cleaned_data['ad']
c = DocEvent(type="added_comment", doc=review, rev=review.rev, by=request.user.person)
c.desc = "Shepherding AD changed to "+review.ad.name
c.save()
review.save_with_history([c])
send_conflict_review_ad_changed_email(request, review, c)
return redirect('ietf.doc.views_doc.document_main', name=review.name)
else:
init = { "ad" : review.ad_id }
form = AdForm(initial=init)
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document
titletext = 'the conflict review of %s-%s' % (conflictdoc.canonical_name(),conflictdoc.rev)
return render(request, 'doc/change_ad.html',
{'form': form,
'doc': review,
'titletext': titletext
},
)
def default_approval_text(review):
current_text = review.text_or_error() # pyflakes:ignore
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document
if conflictdoc.stream_id=='ise':
receiver = 'Independent Submissions Editor'
elif conflictdoc.stream_id=='irtf':
receiver = 'IRTF'
else:
receiver = 'recipient'
addrs = gather_address_lists('ballot_approved_conflrev',doc=review).as_strings(compact=False)
text = render_to_string("doc/conflict_review/approval_text.txt",
dict(review=review,
review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(),
conflictdoc = conflictdoc,
conflictdoc_url = settings.IDTRACKER_BASE_URL+conflictdoc.get_absolute_url(),
receiver=receiver,
approved_review = current_text,
to = addrs.to,
cc = addrs.cc,
)
)
return text
class AnnouncementForm(forms.Form):
announcement_text = forms.CharField(widget=forms.Textarea, label="IETF Conflict Review Announcement", help_text="Edit the announcement message.", required=True, strip=False)
@role_required("Secretariat")
def approve_conflict_review(request, name):
"""Approve this conflict review, setting the appropriate state and send the announcement to the right parties."""
review = get_object_or_404(Document, type="conflrev", name=name)
if review.get_state('conflrev').slug not in ('appr-reqnopub-pend','appr-noprob-pend'):
raise Http404
login = request.user.person
if request.method == 'POST':
form = AnnouncementForm(request.POST)
if form.is_valid():
prev_state = review.get_state()
events = []
new_state_slug = 'appr-reqnopub-sent' if prev_state.slug == 'appr-reqnopub-pend' else 'appr-noprob-sent'
new_state = State.objects.get(used=True, type="conflrev", slug=new_state_slug)
review.set_state(new_state)
e = add_state_change_event(review, login, prev_state, new_state)
events.append(e)
close_open_ballots(review, login)
e = DocEvent(doc=review, rev=review.rev, by=login)
e.type = "iesg_approved"
e.desc = "IESG has approved the conflict review response"
e.save()
events.append(e)
review.save_with_history(events)
# send announcement
send_mail_preformatted(request, form.cleaned_data['announcement_text'])
c = DocEvent(type="added_comment", doc=review, rev=review.rev, by=login)
c.desc = "The following approval message was sent\n"+form.cleaned_data['announcement_text']
c.save()
return HttpResponseRedirect(review.get_absolute_url())
else:
init = { "announcement_text" : default_approval_text(review) }
form = AnnouncementForm(initial=init)
return render(request, 'doc/conflict_review/approve.html',
dict(
review = review,
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document,
form = form,
))
class SimpleStartReviewForm(forms.Form):
notify = forms.CharField(max_length=255, label="Notice emails", help_text="Separate email addresses with commas.", required=False)
class StartReviewForm(forms.Form):
ad = forms.ModelChoiceField(Person.objects.filter(role__name="ad", role__group__state="active",role__group__type='area').order_by('name'),
label="Shepherding AD", empty_label="(None)", required=True)
create_in_state = forms.ModelChoiceField(State.objects.filter(used=True, type="conflrev", slug__in=("needshep", "adrev")), empty_label=None, required=False)
notify = forms.CharField(max_length=255, label="Notice emails", help_text="Separate email addresses with commas.", required=False)
telechat_date = forms.TypedChoiceField(coerce=lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), empty_value=None, required=False, widget=forms.Select(attrs={'onchange':'make_bold()'}))
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# telechat choices
dates = [d.date for d in TelechatDate.objects.active().order_by('date')]
#init = kwargs['initial']['telechat_date']
#if init and init not in dates:
# dates.insert(0, init)
self.fields['telechat_date'].choices = [("", "(not on agenda)")] + [(d, d.strftime("%Y-%m-%d")) for d in dates]
@role_required("Secretariat","IRTF Chair","ISE")
def start_review(request, name):
if has_role(request.user,"Secretariat"):
return start_review_as_secretariat(request,name)
else:
return start_review_as_stream_owner(request,name)
def start_review_sanity_check(request, name):
doc_to_review = get_object_or_404(Document, type="draft", name=name)
if ( not doc_to_review.stream_id in ('ise','irtf') ) or ( not is_authorized_in_doc_stream(request.user,doc_to_review)):
raise Http404
# sanity check that there's not already a conflict review document for this document
if [ rel.source for alias in doc_to_review.docalias.all() for rel in alias.relateddocument_set.filter(relationship='conflrev') ]:
raise Http404
return doc_to_review
def build_notify_addresses(doc_to_review):
# Take care to do the right thing during ietf chair and stream owner transitions
notify_addresses = []
notify_addresses.extend([r.formatted_email() for r in Role.objects.filter(group__acronym=doc_to_review.stream.slug, name='chair')])
notify_addresses.append("%s@%s" % (doc_to_review.name, settings.DRAFT_ALIAS_DOMAIN))
return notify_addresses
def build_conflict_review_document(login, doc_to_review, ad, notify, create_in_state):
if doc_to_review.name.startswith('draft-'):
review_name = 'conflict-review-'+doc_to_review.name[6:]
else:
# This is a failsafe - and might be treated better as an error
review_name = 'conflict-review-'+doc_to_review.name
iesg_group = Group.objects.get(acronym='iesg')
conflict_review = Document.objects.create(
type_id="conflrev",
title="IETF conflict review for %s" % doc_to_review.name,
name=review_name,
rev="00",
ad=ad,
notify=notify,
stream_id='ietf',
group=iesg_group,
)
conflict_review.set_state(create_in_state)
DocAlias.objects.create( name=review_name).docs.add( conflict_review )
conflict_review.relateddocument_set.create(target=DocAlias.objects.get(name=doc_to_review.name),relationship_id='conflrev')
c = DocEvent(type="added_comment", doc=conflict_review, rev=conflict_review.rev, by=login)
c.desc = "IETF conflict review requested"
c.save()
c = DocEvent(type="added_comment", doc=doc_to_review, rev=doc_to_review.rev, by=login)
# Is it really OK to put html tags into comment text?
c.desc = 'IETF conflict review initiated - see <a href="%s">%s</a>' % (reverse('ietf.doc.views_doc.document_main', kwargs={'name':conflict_review.name}),conflict_review.name)
c.save()
return conflict_review
def start_review_as_secretariat(request, name):
"""Start the conflict review process, setting the initial shepherding AD, and possibly putting the review on a telechat."""
doc_to_review = start_review_sanity_check(request, name)
login = request.user.person
if request.method == 'POST':
form = StartReviewForm(request.POST)
if form.is_valid():
conflict_review = build_conflict_review_document(login = login,
doc_to_review = doc_to_review,
ad = form.cleaned_data['ad'],
notify = form.cleaned_data['notify'],
create_in_state = form.cleaned_data['create_in_state']
)
tc_date = form.cleaned_data['telechat_date']
if tc_date:
update_telechat(request, conflict_review, login, tc_date)
send_conflict_review_started_email(request, conflict_review)
return HttpResponseRedirect(conflict_review.get_absolute_url())
else:
notify_addresses = build_notify_addresses(doc_to_review)
init = {
"ad" : Role.objects.filter(group__acronym='ietf',name='chair')[0].person.id,
"notify" : ', '.join(notify_addresses),
}
form = StartReviewForm(initial=init)
return render(request, 'doc/conflict_review/start.html',
{'form': form,
'doc_to_review': doc_to_review,
},
)
def start_review_as_stream_owner(request, name):
"""Start the conflict review process using defaults for everything but notify and let the secretariat know"""
doc_to_review = start_review_sanity_check(request, name)
login = request.user.person
if request.method == 'POST':
form = SimpleStartReviewForm(request.POST)
if form.is_valid():
conflict_review = build_conflict_review_document(login = login,
doc_to_review = doc_to_review,
ad = Role.objects.filter(group__acronym='ietf',name='chair')[0].person,
notify = form.cleaned_data['notify'],
create_in_state = State.objects.get(used=True,type='conflrev',slug='needshep')
)
send_conflict_review_started_email(request, conflict_review)
return HttpResponseRedirect(conflict_review.get_absolute_url())
else:
notify_addresses = build_notify_addresses(doc_to_review)
init = {
"notify" : ', '.join(notify_addresses),
}
form = SimpleStartReviewForm(initial=init)
return render(request, 'doc/conflict_review/start.html',
{'form': form,
'doc_to_review': doc_to_review,
},
)
|
the-stack_106_31614 | import operator
import functools
number = str(open("number.txt", "r").read())
numbers = []
for i in range(0, len(number)-13):
n = [int(i) for i in number[i:i+13]]
numbers.append(functools.reduce(operator.mul, n, 1))
print(max(numbers))
|
the-stack_106_31616 | #!/usr/bin/env python
#
# Electrum - lightweight Futurocoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry
words = [
"like",
"just",
"love",
"know",
"never",
"want",
"time",
"out",
"there",
"make",
"look",
"eye",
"down",
"only",
"think",
"heart",
"back",
"then",
"into",
"about",
"more",
"away",
"still",
"them",
"take",
"thing",
"even",
"through",
"long",
"always",
"world",
"too",
"friend",
"tell",
"try",
"hand",
"thought",
"over",
"here",
"other",
"need",
"smile",
"again",
"much",
"cry",
"been",
"night",
"ever",
"little",
"said",
"end",
"some",
"those",
"around",
"mind",
"people",
"girl",
"leave",
"dream",
"left",
"turn",
"myself",
"give",
"nothing",
"really",
"off",
"before",
"something",
"find",
"walk",
"wish",
"good",
"once",
"place",
"ask",
"stop",
"keep",
"watch",
"seem",
"everything",
"wait",
"got",
"yet",
"made",
"remember",
"start",
"alone",
"run",
"hope",
"maybe",
"believe",
"body",
"hate",
"after",
"close",
"talk",
"stand",
"own",
"each",
"hurt",
"help",
"home",
"god",
"soul",
"new",
"many",
"two",
"inside",
"should",
"true",
"first",
"fear",
"mean",
"better",
"play",
"another",
"gone",
"change",
"use",
"wonder",
"someone",
"hair",
"cold",
"open",
"best",
"any",
"behind",
"happen",
"water",
"dark",
"laugh",
"stay",
"forever",
"name",
"work",
"show",
"sky",
"break",
"came",
"deep",
"door",
"put",
"black",
"together",
"upon",
"happy",
"such",
"great",
"white",
"matter",
"fill",
"past",
"please",
"burn",
"cause",
"enough",
"touch",
"moment",
"soon",
"voice",
"scream",
"anything",
"stare",
"sound",
"red",
"everyone",
"hide",
"kiss",
"truth",
"death",
"beautiful",
"mine",
"blood",
"broken",
"very",
"pass",
"next",
"forget",
"tree",
"wrong",
"air",
"mother",
"understand",
"lip",
"hit",
"wall",
"memory",
"sleep",
"free",
"high",
"realize",
"school",
"might",
"skin",
"sweet",
"perfect",
"blue",
"kill",
"breath",
"dance",
"against",
"fly",
"between",
"grow",
"strong",
"under",
"listen",
"bring",
"sometimes",
"speak",
"pull",
"person",
"become",
"family",
"begin",
"ground",
"real",
"small",
"father",
"sure",
"feet",
"rest",
"young",
"finally",
"land",
"across",
"today",
"different",
"guy",
"line",
"fire",
"reason",
"reach",
"second",
"slowly",
"write",
"eat",
"smell",
"mouth",
"step",
"learn",
"three",
"floor",
"promise",
"breathe",
"darkness",
"push",
"earth",
"guess",
"save",
"song",
"above",
"along",
"both",
"color",
"house",
"almost",
"sorry",
"anymore",
"brother",
"okay",
"dear",
"game",
"fade",
"already",
"apart",
"warm",
"beauty",
"heard",
"notice",
"question",
"shine",
"began",
"piece",
"whole",
"shadow",
"secret",
"street",
"within",
"finger",
"point",
"morning",
"whisper",
"child",
"moon",
"green",
"story",
"glass",
"kid",
"silence",
"since",
"soft",
"yourself",
"empty",
"shall",
"angel",
"answer",
"baby",
"bright",
"dad",
"path",
"worry",
"hour",
"drop",
"follow",
"power",
"war",
"half",
"flow",
"heaven",
"act",
"chance",
"fact",
"least",
"tired",
"children",
"near",
"quite",
"afraid",
"rise",
"sea",
"taste",
"window",
"cover",
"nice",
"trust",
"lot",
"sad",
"cool",
"force",
"peace",
"return",
"blind",
"easy",
"ready",
"roll",
"rose",
"drive",
"held",
"music",
"beneath",
"hang",
"mom",
"paint",
"emotion",
"quiet",
"clear",
"cloud",
"few",
"pretty",
"bird",
"outside",
"paper",
"picture",
"front",
"rock",
"simple",
"anyone",
"meant",
"reality",
"road",
"sense",
"waste",
"bit",
"leaf",
"thank",
"happiness",
"meet",
"men",
"smoke",
"truly",
"decide",
"self",
"age",
"book",
"form",
"alive",
"carry",
"escape",
"damn",
"instead",
"able",
"ice",
"minute",
"throw",
"catch",
"leg",
"ring",
"course",
"goodbye",
"lead",
"poem",
"sick",
"corner",
"desire",
"known",
"problem",
"remind",
"shoulder",
"suppose",
"toward",
"wave",
"drink",
"jump",
"woman",
"pretend",
"sister",
"week",
"human",
"joy",
"crack",
"grey",
"pray",
"surprise",
"dry",
"knee",
"less",
"search",
"bleed",
"caught",
"clean",
"embrace",
"future",
"king",
"son",
"sorrow",
"chest",
"hug",
"remain",
"sat",
"worth",
"blow",
"daddy",
"final",
"parent",
"tight",
"also",
"create",
"lonely",
"safe",
"cross",
"dress",
"evil",
"silent",
"bone",
"fate",
"perhaps",
"anger",
"class",
"scar",
"snow",
"tiny",
"tonight",
"continue",
"control",
"dog",
"edge",
"mirror",
"month",
"suddenly",
"comfort",
"given",
"loud",
"quickly",
"gaze",
"plan",
"rush",
"stone",
"town",
"battle",
"ignore",
"spirit",
"stood",
"stupid",
"yours",
"brown",
"build",
"dust",
"hey",
"kept",
"pay",
"phone",
"twist",
"although",
"ball",
"beyond",
"hidden",
"nose",
"taken",
"fail",
"float",
"pure",
"somehow",
"wash",
"wrap",
"angry",
"cheek",
"creature",
"forgotten",
"heat",
"rip",
"single",
"space",
"special",
"weak",
"whatever",
"yell",
"anyway",
"blame",
"job",
"choose",
"country",
"curse",
"drift",
"echo",
"figure",
"grew",
"laughter",
"neck",
"suffer",
"worse",
"yeah",
"disappear",
"foot",
"forward",
"knife",
"mess",
"somewhere",
"stomach",
"storm",
"beg",
"idea",
"lift",
"offer",
"breeze",
"field",
"five",
"often",
"simply",
"stuck",
"win",
"allow",
"confuse",
"enjoy",
"except",
"flower",
"seek",
"strength",
"calm",
"grin",
"gun",
"heavy",
"hill",
"large",
"ocean",
"shoe",
"sigh",
"straight",
"summer",
"tongue",
"accept",
"crazy",
"everyday",
"exist",
"grass",
"mistake",
"sent",
"shut",
"surround",
"table",
"ache",
"brain",
"destroy",
"heal",
"nature",
"shout",
"sign",
"stain",
"choice",
"doubt",
"glance",
"glow",
"mountain",
"queen",
"stranger",
"throat",
"tomorrow",
"city",
"either",
"fish",
"flame",
"rather",
"shape",
"spin",
"spread",
"ash",
"distance",
"finish",
"image",
"imagine",
"important",
"nobody",
"shatter",
"warmth",
"became",
"feed",
"flesh",
"funny",
"lust",
"shirt",
"trouble",
"yellow",
"attention",
"bare",
"bite",
"money",
"protect",
"amaze",
"appear",
"born",
"choke",
"completely",
"daughter",
"fresh",
"friendship",
"gentle",
"probably",
"six",
"deserve",
"expect",
"grab",
"middle",
"nightmare",
"river",
"thousand",
"weight",
"worst",
"wound",
"barely",
"bottle",
"cream",
"regret",
"relationship",
"stick",
"test",
"crush",
"endless",
"fault",
"itself",
"rule",
"spill",
"art",
"circle",
"join",
"kick",
"mask",
"master",
"passion",
"quick",
"raise",
"smooth",
"unless",
"wander",
"actually",
"broke",
"chair",
"deal",
"favorite",
"gift",
"note",
"number",
"sweat",
"box",
"chill",
"clothes",
"lady",
"mark",
"park",
"poor",
"sadness",
"tie",
"animal",
"belong",
"brush",
"consume",
"dawn",
"forest",
"innocent",
"pen",
"pride",
"stream",
"thick",
"clay",
"complete",
"count",
"draw",
"faith",
"press",
"silver",
"struggle",
"surface",
"taught",
"teach",
"wet",
"bless",
"chase",
"climb",
"enter",
"letter",
"melt",
"metal",
"movie",
"stretch",
"swing",
"vision",
"wife",
"beside",
"crash",
"forgot",
"guide",
"haunt",
"joke",
"knock",
"plant",
"pour",
"prove",
"reveal",
"steal",
"stuff",
"trip",
"wood",
"wrist",
"bother",
"bottom",
"crawl",
"crowd",
"fix",
"forgive",
"frown",
"grace",
"loose",
"lucky",
"party",
"release",
"surely",
"survive",
"teacher",
"gently",
"grip",
"speed",
"suicide",
"travel",
"treat",
"vein",
"written",
"cage",
"chain",
"conversation",
"date",
"enemy",
"however",
"interest",
"million",
"page",
"pink",
"proud",
"sway",
"themselves",
"winter",
"church",
"cruel",
"cup",
"demon",
"experience",
"freedom",
"pair",
"pop",
"purpose",
"respect",
"shoot",
"softly",
"state",
"strange",
"bar",
"birth",
"curl",
"dirt",
"excuse",
"lord",
"lovely",
"monster",
"order",
"pack",
"pants",
"pool",
"scene",
"seven",
"shame",
"slide",
"ugly",
"among",
"blade",
"blonde",
"closet",
"creek",
"deny",
"drug",
"eternity",
"gain",
"grade",
"handle",
"key",
"linger",
"pale",
"prepare",
"swallow",
"swim",
"tremble",
"wheel",
"won",
"cast",
"cigarette",
"claim",
"college",
"direction",
"dirty",
"gather",
"ghost",
"hundred",
"loss",
"lung",
"orange",
"present",
"swear",
"swirl",
"twice",
"wild",
"bitter",
"blanket",
"doctor",
"everywhere",
"flash",
"grown",
"knowledge",
"numb",
"pressure",
"radio",
"repeat",
"ruin",
"spend",
"unknown",
"buy",
"clock",
"devil",
"early",
"false",
"fantasy",
"pound",
"precious",
"refuse",
"sheet",
"teeth",
"welcome",
"add",
"ahead",
"block",
"bury",
"caress",
"content",
"depth",
"despite",
"distant",
"marry",
"purple",
"threw",
"whenever",
"bomb",
"dull",
"easily",
"grasp",
"hospital",
"innocence",
"normal",
"receive",
"reply",
"rhyme",
"shade",
"someday",
"sword",
"toe",
"visit",
"asleep",
"bought",
"center",
"consider",
"flat",
"hero",
"history",
"ink",
"insane",
"muscle",
"mystery",
"pocket",
"reflection",
"shove",
"silently",
"smart",
"soldier",
"spot",
"stress",
"train",
"type",
"view",
"whether",
"bus",
"energy",
"explain",
"holy",
"hunger",
"inch",
"magic",
"mix",
"noise",
"nowhere",
"prayer",
"presence",
"shock",
"snap",
"spider",
"study",
"thunder",
"trail",
"admit",
"agree",
"bag",
"bang",
"bound",
"butterfly",
"cute",
"exactly",
"explode",
"familiar",
"fold",
"further",
"pierce",
"reflect",
"scent",
"selfish",
"sharp",
"sink",
"spring",
"stumble",
"universe",
"weep",
"women",
"wonderful",
"action",
"ancient",
"attempt",
"avoid",
"birthday",
"branch",
"chocolate",
"core",
"depress",
"drunk",
"especially",
"focus",
"fruit",
"honest",
"match",
"palm",
"perfectly",
"pillow",
"pity",
"poison",
"roar",
"shift",
"slightly",
"thump",
"truck",
"tune",
"twenty",
"unable",
"wipe",
"wrote",
"coat",
"constant",
"dinner",
"drove",
"egg",
"eternal",
"flight",
"flood",
"frame",
"freak",
"gasp",
"glad",
"hollow",
"motion",
"peer",
"plastic",
"root",
"screen",
"season",
"sting",
"strike",
"team",
"unlike",
"victim",
"volume",
"warn",
"weird",
"attack",
"await",
"awake",
"built",
"charm",
"crave",
"despair",
"fought",
"grant",
"grief",
"horse",
"limit",
"message",
"ripple",
"sanity",
"scatter",
"serve",
"split",
"string",
"trick",
"annoy",
"blur",
"boat",
"brave",
"clearly",
"cling",
"connect",
"fist",
"forth",
"imagination",
"iron",
"jock",
"judge",
"lesson",
"milk",
"misery",
"nail",
"naked",
"ourselves",
"poet",
"possible",
"princess",
"sail",
"size",
"snake",
"society",
"stroke",
"torture",
"toss",
"trace",
"wise",
"bloom",
"bullet",
"cell",
"check",
"cost",
"darling",
"during",
"footstep",
"fragile",
"hallway",
"hardly",
"horizon",
"invisible",
"journey",
"midnight",
"mud",
"nod",
"pause",
"relax",
"shiver",
"sudden",
"value",
"youth",
"abuse",
"admire",
"blink",
"breast",
"bruise",
"constantly",
"couple",
"creep",
"curve",
"difference",
"dumb",
"emptiness",
"gotta",
"honor",
"plain",
"planet",
"recall",
"rub",
"ship",
"slam",
"soar",
"somebody",
"tightly",
"weather",
"adore",
"approach",
"bond",
"bread",
"burst",
"candle",
"coffee",
"cousin",
"crime",
"desert",
"flutter",
"frozen",
"grand",
"heel",
"hello",
"language",
"level",
"movement",
"pleasure",
"powerful",
"random",
"rhythm",
"settle",
"silly",
"slap",
"sort",
"spoken",
"steel",
"threaten",
"tumble",
"upset",
"aside",
"awkward",
"bee",
"blank",
"board",
"button",
"card",
"carefully",
"complain",
"crap",
"deeply",
"discover",
"drag",
"dread",
"effort",
"entire",
"fairy",
"giant",
"gotten",
"greet",
"illusion",
"jeans",
"leap",
"liquid",
"march",
"mend",
"nervous",
"nine",
"replace",
"rope",
"spine",
"stole",
"terror",
"accident",
"apple",
"balance",
"boom",
"childhood",
"collect",
"demand",
"depression",
"eventually",
"faint",
"glare",
"goal",
"group",
"honey",
"kitchen",
"laid",
"limb",
"machine",
"mere",
"mold",
"murder",
"nerve",
"painful",
"poetry",
"prince",
"rabbit",
"shelter",
"shore",
"shower",
"soothe",
"stair",
"steady",
"sunlight",
"tangle",
"tease",
"treasure",
"uncle",
"begun",
"bliss",
"canvas",
"cheer",
"claw",
"clutch",
"commit",
"crimson",
"crystal",
"delight",
"doll",
"existence",
"express",
"fog",
"football",
"gay",
"goose",
"guard",
"hatred",
"illuminate",
"mass",
"math",
"mourn",
"rich",
"rough",
"skip",
"stir",
"student",
"style",
"support",
"thorn",
"tough",
"yard",
"yearn",
"yesterday",
"advice",
"appreciate",
"autumn",
"bank",
"beam",
"bowl",
"capture",
"carve",
"collapse",
"confusion",
"creation",
"dove",
"feather",
"girlfriend",
"glory",
"government",
"harsh",
"hop",
"inner",
"loser",
"moonlight",
"neighbor",
"neither",
"peach",
"pig",
"praise",
"screw",
"shield",
"shimmer",
"sneak",
"stab",
"subject",
"throughout",
"thrown",
"tower",
"twirl",
"wow",
"army",
"arrive",
"bathroom",
"bump",
"cease",
"cookie",
"couch",
"courage",
"dim",
"guilt",
"howl",
"hum",
"husband",
"insult",
"led",
"lunch",
"mock",
"mostly",
"natural",
"nearly",
"needle",
"nerd",
"peaceful",
"perfection",
"pile",
"price",
"remove",
"roam",
"sanctuary",
"serious",
"shiny",
"shook",
"sob",
"stolen",
"tap",
"vain",
"void",
"warrior",
"wrinkle",
"affection",
"apologize",
"blossom",
"bounce",
"bridge",
"cheap",
"crumble",
"decision",
"descend",
"desperately",
"dig",
"dot",
"flip",
"frighten",
"heartbeat",
"huge",
"lazy",
"lick",
"odd",
"opinion",
"process",
"puzzle",
"quietly",
"retreat",
"score",
"sentence",
"separate",
"situation",
"skill",
"soak",
"square",
"stray",
"taint",
"task",
"tide",
"underneath",
"veil",
"whistle",
"anywhere",
"bedroom",
"bid",
"bloody",
"burden",
"careful",
"compare",
"concern",
"curtain",
"decay",
"defeat",
"describe",
"double",
"dreamer",
"driver",
"dwell",
"evening",
"flare",
"flicker",
"grandma",
"guitar",
"harm",
"horrible",
"hungry",
"indeed",
"lace",
"melody",
"monkey",
"nation",
"object",
"obviously",
"rainbow",
"salt",
"scratch",
"shown",
"shy",
"stage",
"stun",
"third",
"tickle",
"useless",
"weakness",
"worship",
"worthless",
"afternoon",
"beard",
"boyfriend",
"bubble",
"busy",
"certain",
"chin",
"concrete",
"desk",
"diamond",
"doom",
"drawn",
"due",
"felicity",
"freeze",
"frost",
"garden",
"glide",
"harmony",
"hopefully",
"hunt",
"jealous",
"lightning",
"mama",
"mercy",
"peel",
"physical",
"position",
"pulse",
"punch",
"quit",
"rant",
"respond",
"salty",
"sane",
"satisfy",
"savior",
"sheep",
"slept",
"social",
"sport",
"tuck",
"utter",
"valley",
"wolf",
"aim",
"alas",
"alter",
"arrow",
"awaken",
"beaten",
"belief",
"brand",
"ceiling",
"cheese",
"clue",
"confidence",
"connection",
"daily",
"disguise",
"eager",
"erase",
"essence",
"everytime",
"expression",
"fan",
"flag",
"flirt",
"foul",
"fur",
"giggle",
"glorious",
"ignorance",
"law",
"lifeless",
"measure",
"mighty",
"muse",
"north",
"opposite",
"paradise",
"patience",
"patient",
"pencil",
"petal",
"plate",
"ponder",
"possibly",
"practice",
"slice",
"spell",
"stock",
"strife",
"strip",
"suffocate",
"suit",
"tender",
"tool",
"trade",
"velvet",
"verse",
"waist",
"witch",
"aunt",
"bench",
"bold",
"cap",
"certainly",
"click",
"companion",
"creator",
"dart",
"delicate",
"determine",
"dish",
"dragon",
"drama",
"drum",
"dude",
"everybody",
"feast",
"forehead",
"former",
"fright",
"fully",
"gas",
"hook",
"hurl",
"invite",
"juice",
"manage",
"moral",
"possess",
"raw",
"rebel",
"royal",
"scale",
"scary",
"several",
"slight",
"stubborn",
"swell",
"talent",
"tea",
"terrible",
"thread",
"torment",
"trickle",
"usually",
"vast",
"violence",
"weave",
"acid",
"agony",
"ashamed",
"awe",
"belly",
"blend",
"blush",
"character",
"cheat",
"common",
"company",
"coward",
"creak",
"danger",
"deadly",
"defense",
"define",
"depend",
"desperate",
"destination",
"dew",
"duck",
"dusty",
"embarrass",
"engine",
"example",
"explore",
"foe",
"freely",
"frustrate",
"generation",
"glove",
"guilty",
"health",
"hurry",
"idiot",
"impossible",
"inhale",
"jaw",
"kingdom",
"mention",
"mist",
"moan",
"mumble",
"mutter",
"observe",
"ode",
"pathetic",
"pattern",
"pie",
"prefer",
"puff",
"rape",
"rare",
"revenge",
"rude",
"scrape",
"spiral",
"squeeze",
"strain",
"sunset",
"suspend",
"sympathy",
"thigh",
"throne",
"total",
"unseen",
"weapon",
"weary"
]
n = 1626
# Note about US patent no 5892470: Here each word does not represent a given digit.
# Instead, the digit represented by a word is variable, it depends on the previous word.
def mn_encode( message ):
assert len(message) % 8 == 0
out = []
for i in range(len(message)//8):
word = message[8*i:8*i+8]
x = int(word, 16)
w1 = (x%n)
w2 = ((x//n) + w1)%n
w3 = ((x//n//n) + w2)%n
out += [ words[w1], words[w2], words[w3] ]
return out
def mn_decode( wlist ):
out = ''
for i in range(len(wlist)//3):
word1, word2, word3 = wlist[3*i:3*i+3]
w1 = words.index(word1)
w2 = (words.index(word2))%n
w3 = (words.index(word3))%n
x = w1 +n*((w2-w1)%n) +n*n*((w3-w2)%n)
out += '%08x'%x
return out
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
print('I need arguments: a hex string to encode, or a list of words to decode')
elif len(sys.argv) == 2:
print(' '.join(mn_encode(sys.argv[1])))
else:
print(mn_decode(sys.argv[1:]))
|
the-stack_106_31618 | import json
import random
def dropentities(category, num):
with open("../data/flatontology.json", 'r') as f:
flatontology = json.load(f)
categorypath = flatontology[category]["fullpath"]
with open("../data/" + categorypath + "fileindex.json", 'r') as f:
fileindex = json.load(f)
for key in random.sample(fileindex.keys(), num):
del fileindex[key]
if __name__ == "__main__":
dropentities("CareerStation", 10)
|
the-stack_106_31619 | import os
import datetime
import json
import logging
from unittest import TestCase
from contextlib import contextmanager
os.environ['REDASH_REDIS_URL'] = os.environ.get('REDASH_REDIS_URL', "redis://localhost:6379/0").replace("/0", "/5")
# Use different url for Celery to avoid DB being cleaned up:
os.environ['REDASH_CELERY_BROKER'] = os.environ.get('REDASH_REDIS_URL', "redis://localhost:6379/0").replace("/5", "/6")
# Dummy values for oauth login
os.environ['REDASH_GOOGLE_CLIENT_ID'] = "dummy"
os.environ['REDASH_GOOGLE_CLIENT_SECRET'] = "dummy"
os.environ['REDASH_MULTI_ORG'] = "true"
from redash import create_app
from redash import redis_connection
from redash.models import db
from redash.utils import json_dumps
from tests.factories import Factory, user_factory
logging.disable("INFO")
logging.getLogger("metrics").setLevel("ERROR")
def authenticate_request(c, user):
with c.session_transaction() as sess:
sess['user_id'] = user.id
@contextmanager
def authenticated_user(c, user=None):
if not user:
user = user_factory.create()
db.session.commit()
authenticate_request(c, user)
yield user
class BaseTestCase(TestCase):
def setUp(self):
self.app = create_app()
self.db = db
self.app.config['TESTING'] = True
self.app.config['SERVER_NAME'] = 'localhost'
self.app_ctx = self.app.app_context()
self.app_ctx.push()
db.session.close()
db.drop_all()
db.create_all()
self.factory = Factory()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.get_engine(self.app).dispose()
self.app_ctx.pop()
redis_connection.flushdb()
def make_request(self, method, path, org=None, user=None, data=None,
is_json=True, follow_redirects=False):
if user is None:
user = self.factory.user
if org is None:
org = self.factory.org
if org is not False:
path = "/{}{}".format(org.slug, path)
if user:
authenticate_request(self.client, user)
method_fn = getattr(self.client, method.lower())
headers = {}
if data and is_json:
data = json_dumps(data)
if is_json:
content_type = 'application/json'
else:
content_type = None
response = method_fn(
path,
data=data,
headers=headers,
content_type=content_type,
follow_redirects=follow_redirects,
)
if response.data and is_json:
response.json = json.loads(response.data)
return response
def get_request(self, path, org=None, headers=None):
if org:
path = "/{}{}".format(org.slug, path)
return self.client.get(path, headers=headers)
def post_request(self, path, data=None, org=None, headers=None):
if org:
path = "/{}{}".format(org.slug, path)
return self.client.post(path, data=data, headers=headers)
def assertResponseEqual(self, expected, actual):
for k, v in expected.iteritems():
if isinstance(v, datetime.datetime) or isinstance(actual[k], datetime.datetime):
continue
if isinstance(v, list):
continue
if isinstance(v, dict):
self.assertResponseEqual(v, actual[k])
continue
self.assertEqual(v, actual[k], "{} not equal (expected: {}, actual: {}).".format(k, v, actual[k]))
|
the-stack_106_31620 | import numpy as np
class RingBuffer(object):
def __init__(self, maxlen, shape, dtype='float32'):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = np.zeros((maxlen,) + shape).astype(dtype)
def __len__(self):
return self.length
def __getitem__(self, idx):
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def get_batch(self, idxs):
return self.data[(self.start + idxs) % self.maxlen]
def append(self, v):
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
def extend(self, v):
if isinstance(v, list):
v = np.array(v)
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += v.shape[0]
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + v.shape[0]) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
start = (self.start + self.length - v.shape[0]) % self.maxlen
if start + v.shape[0] < self.maxlen:
self.data[start:start + v.shape[0], :] = v.reshape(v.shape[0], -1)
else:
# import IPython; IPython.embed(); import sys; sys.exit(0)
self.data[start:, :] = v.reshape(v.shape[0], -1)[:(self.maxlen - start),:]
self.data[:v.shape[0] - (self.maxlen - start), :] = v.reshape(v.shape[0], -1)[(self.maxlen - start):,:]
def array_min2d(x):
x = np.array(x)
if x.ndim >= 2:
return x
return x.reshape(-1, 1)
class Memory(object):
def __init__(self, limit, action_shape, observation_shape):
self.limit = limit
self.observations0 = RingBuffer(limit, shape=observation_shape)
self.actions = RingBuffer(limit, shape=action_shape)
self.rewards = RingBuffer(limit, shape=(1,))
self.terminals1 = RingBuffer(limit, shape=(1,))
self.observations1 = RingBuffer(limit, shape=observation_shape)
def sample(self, batch_size):
# Draw such that we always have a proceeding element.
batch_idxs = np.random.randint(self.nb_entries - 2, size=batch_size)
obs0_batch = self.observations0.get_batch(batch_idxs)
obs1_batch = self.observations1.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
reward_batch = self.rewards.get_batch(batch_idxs)
terminal1_batch = self.terminals1.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'obs1': array_min2d(obs1_batch),
'rewards': array_min2d(reward_batch),
'actions': array_min2d(action_batch),
'terminals1': array_min2d(terminal1_batch),
}
return result
def append(self, obs0, action, reward, obs1, terminal1, training=True):
if not training:
return
self.observations0.extend(obs0)
self.actions.extend(action)
self.rewards.extend(reward)
self.observations1.extend(obs1)
self.terminals1.extend(terminal1)
@property
def nb_entries(self):
return len(self.observations0)
class ExpertMemory(object):
def __init__(self, limit, action_shape, observation_shape):
self.limit = limit
self.observations0 = RingBuffer(limit, shape=observation_shape)
self.actions = RingBuffer(limit, shape=action_shape)
self.values = RingBuffer(limit, shape=(1,))
def sample(self, batch_size):
# Draw such that we always have a proceeding element.
batch_idxs = np.random.randint(self.nb_entries - 2, size=batch_size)
obs0_batch = self.observations0.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
value_batch = self.values.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'values': array_min2d(value_batch),
'actions': array_min2d(action_batch),
}
return result
def append(self, obs0, action, value, training=True):
if not training:
return
self.observations0.extend(obs0)
self.actions.extend(action)
self.values.extend(value)
@property
def nb_entries(self):
return len(self.observations0)
class ExpertActorMemory(object):
def __init__(self, limit, action_shape, observation_shape):
self.limit = limit
self.observations0 = RingBuffer(limit, shape=observation_shape)
self.actions = RingBuffer(limit, shape=action_shape)
def sample(self, batch_size):
# Draw such that we always have a proceeding element.
batch_idxs = np.random.randint(self.nb_entries - 2, size=batch_size)
obs0_batch = self.observations0.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'actions': array_min2d(action_batch),
}
return result
def append(self, obs0, action, training=True):
if not training:
return
self.observations0.extend(obs0)
self.actions.extend(action)
@property
def nb_entries(self):
return len(self.observations0)
|
the-stack_106_31622 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn.functional as F
from pytext.config import ConfigBase
from pytext.config.component import Component, ComponentType
from pytext.utils import loss as loss_utils, precision
from pytext.utils.cuda import FloatTensor
from torch import nn
class Loss(Component):
"""Base class for loss functions"""
__COMPONENT_TYPE__ = ComponentType.LOSS
def __init__(self, config=None, *args, **kwargs):
super().__init__(config)
def __call__(self, logit, targets, reduce=True):
raise NotImplementedError
class CrossEntropyLoss(Loss):
def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):
self.ignore_index = ignore_index
self.weight = weight
def __call__(self, logits, targets, reduce=True):
# Don't change to F.cross_entropy() because @barlaso suggested not doing so.
# There's some wisdom from fairseq folks that it's the preferred way.
# Needs more testing before we can change to using F.cross_entropy().
return F.nll_loss(
F.log_softmax(logits, 1, dtype=torch.float32),
targets,
weight=self.weight,
ignore_index=self.ignore_index,
reduction="mean" if reduce else "none",
)
class NLLLoss(Loss):
def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):
self.ignore_index = ignore_index
self.weight = weight
def __call__(self, log_probs, targets, reduce=True):
return F.nll_loss(
log_probs,
targets,
ignore_index=self.ignore_index,
reduction="elementwise_mean" if reduce else "none",
weight=self.weight,
)
class BinaryCrossEntropyLoss(Loss):
class Config(ConfigBase):
reweight_negative: bool = True
reduce: bool = True
def __call__(self, logits, targets, reduce=True):
"""
Computes 1-vs-all binary cross entropy loss for multiclass
classification.
"""
# Converts targets to one-hot representation. Dim: [batch, n_classes]
one_hot_targets = (
FloatTensor(targets.size(0), logits.size(1))
.zero_()
.scatter_(1, targets.unsqueeze(1).data, 1)
)
"""
`F.binary_cross_entropy` or `torch.nn.BCELoss.` requires the
output of the previous function be already a FloatTensor.
"""
# This weighting applies uniform class weights.
# examples_per_class = one_hot_target.sum(0).clamp(min=1)
# total_positive = examples_per_class.sum()
# weights = total_positive.unsqueeze(0) / examples_per_class
loss = F.binary_cross_entropy_with_logits(
precision.maybe_float(logits), one_hot_targets, reduction="none"
)
if self.config.reweight_negative:
# This makes sure we have same weights for all negative classes and
# single positive class. Weight is 1 for the correct class and
# 1 / (n - 1) for other ones.
weights = one_hot_targets + (1.0 - one_hot_targets) / max(
1, one_hot_targets.size(1) - 1.0
)
loss = loss * weights
return loss.sum(1).mean() if reduce else loss.sum(1)
class AUCPRHingeLoss(nn.Module, Loss):
"""area under the precision-recall curve loss,
Reference: "Scalable Learning of Non-Decomposable Objectives", Section 5 \
TensorFlow Implementation: \
https://github.com/tensorflow/models/tree/master/research/global_objectives\
"""
class Config(ConfigBase):
"""
Attributes:
precision_range_lower (float): the lower range of precision values over
which to compute AUC. Must be nonnegative, `\leq precision_range_upper`,
and `leq 1.0`.
precision_range_upper (float): the upper range of precision values over
which to compute AUC. Must be nonnegative, `\geq precision_range_lower`,
and `leq 1.0`.
num_classes (int): number of classes(aka labels)
num_anchors (int): The number of grid points used to approximate the
Riemann sum.
"""
precision_range_lower: float = 0.0
precision_range_upper: float = 1.0
num_classes: int = 1
num_anchors: int = 20
def __init__(self, config, weights=None, *args, **kwargs):
"""Args:
config: Config containing `precision_range_lower`, `precision_range_upper`,
`num_classes`, `num_anchors`
"""
nn.Module.__init__(self)
Loss.__init__(self, config)
self.num_classes = self.config.num_classes
self.num_anchors = self.config.num_anchors
self.precision_range = (
self.config.precision_range_lower,
self.config.precision_range_upper,
)
# Create precision anchor values and distance between anchors.
# coresponding to [alpha_t] and [delta_t] in the paper.
# precision_values: 1D `Tensor` of shape [K], where `K = num_anchors`
# delta: Scalar (since we use equal distance between anchors)
self.precision_values, self.delta = loss_utils.range_to_anchors_and_delta(
self.precision_range, self.num_anchors
)
# notation is [b_k] in paper, Parameter of shape [C, K]
# where `C = number of classes` `K = num_anchors`
self.biases = nn.Parameter(
FloatTensor(self.config.num_classes, self.config.num_anchors).zero_()
)
self.lambdas = nn.Parameter(
FloatTensor(self.config.num_classes, self.config.num_anchors).data.fill_(
1.0
)
)
def forward(self, logits, targets, reduce=True, size_average=True, weights=None):
"""
Args:
logits: Variable :math:`(N, C)` where `C = number of classes`
targets: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weights: Coefficients for the loss. Must be a `Tensor` of shape
[N] or [N, C], where `N = batch_size`, `C = number of classes`.
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per input/target element instead and ignores
size_average. Default: True
"""
C = 1 if logits.dim() == 1 else logits.size(1)
if self.num_classes != C:
raise ValueError(
"num classes is %d while logits width is %d" % (self.num_classes, C)
)
labels, weights = AUCPRHingeLoss._prepare_labels_weights(
logits, targets, weights=weights
)
# Lagrange multipliers
# Lagrange multipliers are required to be nonnegative.
# Their gradient is reversed so that they are maximized
# (rather than minimized) by the optimizer.
# 1D `Tensor` of shape [K], where `K = num_anchors`
lambdas = loss_utils.lagrange_multiplier(self.lambdas)
# print("lambdas: {}".format(lambdas))
# A `Tensor` of Shape [N, C, K]
hinge_loss = loss_utils.weighted_hinge_loss(
labels.unsqueeze(-1),
logits.unsqueeze(-1) - self.biases,
positive_weights=1.0 + lambdas * (1.0 - self.precision_values),
negative_weights=lambdas * self.precision_values,
)
# 1D tensor of shape [C]
class_priors = loss_utils.build_class_priors(labels, weights=weights)
# lambda_term: Tensor[C, K]
# according to paper, lambda_term = lambda * (1 - precision) * |Y^+|
# where |Y^+| is number of postive examples = N * class_priors
lambda_term = class_priors.unsqueeze(-1) * (
lambdas * (1.0 - self.precision_values)
)
per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term
# Riemann sum over anchors, and normalized by precision range
# loss: Tensor[N, C]
loss = per_anchor_loss.sum(2) * self.delta
loss /= self.precision_range[1] - self.precision_range[0]
if not reduce:
return loss
elif size_average:
return loss.mean()
else:
return loss.sum()
@staticmethod
def _prepare_labels_weights(logits, targets, weights=None):
"""
Args:
logits: Variable :math:`(N, C)` where `C = number of classes`
targets: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weights: Coefficients for the loss. Must be a `Tensor` of shape
[N] or [N, C], where `N = batch_size`, `C = number of classes`.
Returns:
labels: Tensor of shape [N, C], one-hot representation
weights: Tensor of shape broadcastable to labels
"""
N, C = logits.size()
# Converts targets to one-hot representation. Dim: [N, C]
labels = FloatTensor(N, C).zero_().scatter(1, targets.unsqueeze(1).data, 1)
if weights is None:
weights = FloatTensor(N).data.fill_(1.0)
if weights.dim() == 1:
weights.unsqueeze_(-1)
return labels, weights
class KLDivergenceBCELoss(Loss):
class Config(ConfigBase):
temperature: float = 1.0
hard_weight: float = 0.0
def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):
assert 0.0 <= config.hard_weight < 1.0
self.ignore_index = ignore_index
self.weight = weight
self.t = config.temperature
self.hard_weight = config.hard_weight
def __call__(self, logits, targets, reduce=True):
"""
Computes Kullback-Leibler divergence loss for multiclass classification
probability distribution computed by BinaryCrossEntropyLoss loss
"""
hard_targets, _, soft_targets_logits = targets
# we clamp the probability between (1e-20, 1 - 1e-20) to avoid log(0) problem
# in the calculation of KLDivergence
soft_targets = F.sigmoid(FloatTensor(soft_targets_logits) / self.t).clamp(
1e-20, 1 - 1e-20
)
probs = F.sigmoid(logits / self.t).clamp(1e-20, 1 - 1e-20)
probs_neg = probs.neg().add(1).clamp(1e-20, 1 - 1e-20)
soft_targets_neg = soft_targets.neg().add(1).clamp(1e-20, 1 - 1e-20)
if self.weight is not None:
soft_loss = (
F.kl_div(probs.log(), soft_targets, reduction="none") * self.weight
+ F.kl_div(probs_neg.log(), soft_targets_neg, reduction="none")
* self.weight
)
if reduce:
soft_loss = soft_loss.mean()
else:
soft_loss = F.kl_div(
probs.log(), soft_targets, reduction="mean" if reduce else "none"
) + F.kl_div(
probs_neg.log(),
soft_targets_neg,
reduction="mean" if reduce else "none",
)
soft_loss *= self.t ** 2 # see https://arxiv.org/pdf/1503.02531.pdf
hard_loss = 0.0
if self.hard_weight > 0.0:
one_hot_targets = (
FloatTensor(hard_targets.size(0), logits.size(1))
.zero_()
.scatter_(1, hard_targets.unsqueeze(1).data, 1)
)
hard_loss = F.binary_cross_entropy_with_logits(
logits,
one_hot_targets,
reduction="mean" if reduce else "none",
weight=self.weight,
)
return (1.0 - self.hard_weight) * soft_loss + self.hard_weight * hard_loss
class KLDivergenceCELoss(Loss):
class Config(ConfigBase):
temperature: float = 1.0
hard_weight: float = 0.0
def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):
# ignore_index not easily added to kl_div loss, don't support this until needed
assert ignore_index < 0
assert 0.0 <= config.hard_weight < 1.0
self.weight = weight
self.t = config.temperature
self.hard_weight = config.hard_weight
def __call__(self, logits, targets, reduce=True):
"""
Computes Kullback-Leibler divergence loss for multiclass classification
probability distribution computed by CrossEntropyLoss loss
"""
hard_targets, _, soft_targets_logits = targets
soft_targets = F.softmax(FloatTensor(soft_targets_logits) / self.t, dim=1)
soft_targets = soft_targets.clamp(1e-10, 1 - 1e-10)
log_probs = F.log_softmax(logits / self.t, 1)
if self.weight is not None:
soft_loss = (
F.kl_div(log_probs, soft_targets, reduction="none") * self.weight
)
if reduce:
soft_loss = soft_loss.mean()
else:
soft_loss = F.kl_div(
log_probs, soft_targets, reduction="mean" if reduce else "none"
)
soft_loss *= self.t ** 2 # see https://arxiv.org/pdf/1503.02531.pdf
hard_loss = 0.0
if self.hard_weight > 0.0:
hard_loss = F.cross_entropy(
logits,
hard_targets,
reduction="mean" if reduce else "none",
weight=self.weight,
)
return (1.0 - self.hard_weight) * soft_loss + self.hard_weight * hard_loss
class PairwiseRankingLoss(Loss):
"""
Given embeddings for a query, positive response and negative response
computes pairwise ranking hinge loss
"""
class Config(ConfigBase):
margin: float = 1.0
@staticmethod
def get_similarities(embeddings):
pos_embed, neg_embed, query_embed = embeddings
pos_similarity = F.cosine_similarity(query_embed, pos_embed)
neg_similarity = F.cosine_similarity(query_embed, neg_embed)
return pos_similarity, neg_similarity, query_embed.size(0)
def __call__(self, logits, targets, reduce=True):
pos_similarity, neg_similarity, batch_size = self.get_similarities(logits)
targets_local = FloatTensor(batch_size)
targets_local.fill_(1) # 1: pos_similarity should be higher than neg_similarity
return F.margin_ranking_loss(
pos_similarity, neg_similarity, targets_local, self.config.margin
)
class MSELoss(Loss):
"""
Mean squared error loss, for regression tasks.
"""
class Config(ConfigBase):
pass
def __call__(self, predictions, targets, reduce=True):
return F.mse_loss(predictions, targets, reduction="mean" if reduce else "none")
class LabelSmoothedCrossEntropyLoss(Loss):
class Config(ConfigBase):
beta: float = 0.1
from_logits: bool = True
def __init__(self, config, ignore_index=-100, weight=None, *args, **kwargs):
# weight values other than 1.0 gives inconsistent behavior
# Refer: https://github.com/pytorch/pytorch/issues/17577
if weight is not None:
assert torch.sum(torch.abs(weight - 1.0)) < 1e-7
self.ignore_index = ignore_index
self.weight = weight
self.beta = config.beta
self.from_logits = config.from_logits
def __call__(self, logits, targets, reduce=True):
"""
Returns the cross-entropy loss alongwith the KL divergence of the
discrete uniform distribution with the logits. Refer section 3.2 of
https://arxiv.org/pdf/1701.06548.pdf
"""
log_probs = F.log_softmax(logits, dim=1) if self.from_logits else logits
cross_entropy_loss = F.nll_loss(
log_probs,
targets,
ignore_index=self.ignore_index,
reduction="mean" if reduce else "none",
weight=self.weight,
)
# negative KL-div has an additional log(num_classes) term but ignored
# here because it doesn't contribute to optimization
label_smoothing_loss = -1 * log_probs.mean(dim=1)
if reduce:
label_smoothing_loss = torch.mean(
label_smoothing_loss[targets != self.ignore_index]
)
return (1.0 - self.beta) * cross_entropy_loss + self.beta * label_smoothing_loss
|
the-stack_106_31626 | #!/usr/bin/env python
import numpy
import scipy.stats
#The Savage-Dickey estimator computation is based on the implementation by Äijö et al. (2016) available at https://github.com/tare/LuxGLM (MIT lisence)
def calculate_savagedickey(prior1_mean,prior1_cov,prior2_mean,prior2_cov,samples1,samples2):
samples1_mean = numpy.mean(samples1,0)
samples1_cov = numpy.cov(samples1,rowvar=0)
samples2_mean = numpy.mean(samples2,0)
samples2_cov = numpy.cov(samples2,rowvar=0)
numerator = scipy.stats.multivariate_normal.pdf(numpy.zeros(prior1_mean.shape),mean=prior1_mean-prior2_mean,cov=prior1_cov+prior2_cov)
denominator = scipy.stats.multivariate_normal.pdf(numpy.zeros(prior1_mean.shape),mean=samples1_mean-samples2_mean,cov=samples1_cov+samples2_cov)
return numerator/denominator
def calculate_savagedickey_kde(prior1_mean,prior1_cov,prior2_mean,prior2_cov,samples1,samples2):
Delta = samples1-samples2
density = scipy.stats.kde.gaussian_kde(Delta,bw_method='scott')
numerator = scipy.stats.multivariate_normal.pdf(numpy.zeros(prior1_mean.shape),mean=prior1_mean-prior2_mean,cov=prior1_cov+prior2_cov)
denominator = density.evaluate([0])[0]
return numerator/denominator
def calculate_savagedickey_kde_1d(prior_mean,prior_cov,samples):
density = scipy.stats.kde.gaussian_kde(samples.T,bw_method='scott')
numerator = scipy.stats.multivariate_normal.pdf(numpy.zeros(prior_mean.shape),mean=prior_mean,cov=prior_cov)
denominator = density.evaluate([0,0])[0]
return numerator/denominator
def calculate_savagedickey_kde_window(prior1_mean,prior1_cov,prior2_mean,prior2_cov,samples1,samples2):
#samples1 and samples2 have shape (# of dims, # of samples). prior1_mean and prior2_mean have shape (#dim,1) and prior1_cov and prior2_cov have shape (#dim,#dim)
Delta = samples1-samples2
density = scipy.stats.kde.gaussian_kde(Delta,bw_method='scott')
numerator = scipy.stats.multivariate_normal.pdf(numpy.zeros(Delta.shape[0]),mean=prior1_mean-prior2_mean,cov=prior1_cov+prior2_cov)
denominator = density.evaluate(numpy.zeros(Delta.shape[0]))
return numerator/denominator, numerator, denominator
|
the-stack_106_31628 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.typing as tp
from util import convert_to_onnx_and_check
def test_gather_nd(test_case):
@flow.global_function()
def gather_nd():
x = flow.get_variable(
name="x",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
y = flow.get_variable(
name="y",
shape=(2, 3),
dtype=flow.int64,
initializer=flow.random_uniform_initializer(0, 1, flow.int64),
)
return flow.gather_nd(x, y)
convert_to_onnx_and_check(gather_nd, opset=11)
|
the-stack_106_31629 | #!/usr/bin/env python
import doctest
import logging
import math as m
import re
from networkx import DiGraph
import copy
import itertools as it
def load_input():
data = list()
with open('input.txt') as fd:
for line in fd:
value = int(line.strip())
data.append(value)
return data
def part1(data):
value = 0
ones = 0
threes = 0
end = max(data) + 3
data.append(end)
while True:
for i in range(1, 4):
value += 1
if value in data:
if i == 1:
ones += 1
if i == 3:
threes += 1
break
if value == end:
break
return ones*threes
def part2(data):
combinations = dict()
end = max(data) + 3
data.append(0)
data.append(end)
combinations[end] = 1
data.sort(reverse = True)
for value in data:
if value == end:
continue
sums = 0
for i in range(1, 4):
next_value = value + i
if next_value in data:
sums += combinations[next_value]
combinations[value] = sums
return combinations[0]
def main():
data = load_input()
result = part1(data.copy())
print("Part1: ", result)
result = part2(data.copy())
print("Part2: ", result)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
the-stack_106_31630 | import tensorflow as tf
import sys
sys.path.append('../')
print(sys.path)
from algos import construct_classifier
from utils.misc import increment_path
from toyexamples.synthetic_data import SquareBlock, ToyWorld
n_epochs = 10
hparams_list = [
{"classifier_type": "paritynn",
"dpe_scalar": 10**i,
"layersizes": [],
"inputsize": 2,
}
for i in range(-5, 2)
]
b1 = SquareBlock(0, [0,0], probpositive=0.8)
b2 = SquareBlock(1, [1,0], probpositive=0.5)
toyworld = ToyWorld()
toyworld.add_block(b1, .7)
toyworld.add_block(b2, .3)
train_dataset, validation_dataset = toyworld.dataset()
print("...dataset loaded.")
print("Launching Tensorboard.\nTo visualize, navigate to "
"http://0.0.0.0:6006/\nTo close Tensorboard,"
" press ctrl+C")
for hparams in hparams_list:
print("Starting new experiment")
with tf.Graph().as_default():
classifier = construct_classifier(hparams)
print("======= Experiment hyperparameters =======\n{}".format(
classifier.hparams))
print(tf.global_variables())
# raise RuntimeError
print("======= Training for {} epochs ===========".format(n_epochs))
classifier.train(train_dataset, epochs=n_epochs,
validation_dataset=validation_dataset)
|
the-stack_106_31633 | # // There are A cities numbered from 1 to A. You have already visited M cities, the
# // indices of which are given in an array B of M integers.
# // If a city with index i is visited, you can visit either the city with index i-1
# // (i >= 2) or the city with index i+1 (i < A) if they are not already visited.
# // Eg: if N = 5 and array M consists of [3, 4], then in the first level of moves,
# // you can either visit 2 or 5.
# // You keep visiting cities in this fashion until all the cities are not visited.
# // Find the number of ways in which you can visit all the cities modulo 10^9+7.
# // Input Format The 1st argument given is an integer A, i.e total number of cities.
# // The 2nd argument given is an integer array B, where B[i] denotes ith city you already visited.
# // Output Format Return an Integer X % (1e9 + 7), number of ways in which you can visit all the cities. Constraints 1 <= A <= 1000 1 <= M <= A 1 <= B[i] <= A
# // For Example
# // // Input: A = 5 B = [2, 5] Output: 6 Explanation: All possible ways to visit remaining cities are : 1. 1 -> 3 -> 4 2. 1 -> 4 -> 3 3. 3 -> 1 -> 4 4. 3 -> 4 -> 1 5. 4 -> 1 -> 3 6. 4 -> 3 -> 1
from math import factorial
class Solution:
def solve(self, A, B):
B = sorted(B)
first_gap = B[0] - 1
last_gap = A - B[-1]
gaps = [b - a - 1 for a, b in zip(B[:-1], B[1:])]
res = factorial(A - len(B))
for g in gaps:
res //= factorial(g)
if g > 0:
res *= 2 ** (g-1)
res //= factorial(first_gap)
res //= factorial(last_gap)
return res % (10**9+7) |
the-stack_106_31637 | # Copyright (c) 2003-2014 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino
#
# Description:
# [MS-RRP] Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/CoreSecurity/impacket/tree/master/impacket/testcases/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
from impacket.dcerpc.v5 import ndr
from impacket.dcerpc.v5.ndr import NDRCALL, NDR, NDRSTRUCT, NDRPOINTER, NDRUniConformantVaryingArray
from impacket.dcerpc.v5.dtypes import *
from impacket import system_errors
from impacket.uuid import uuidtup_to_bin
from impacket.dcerpc.v5.enum import Enum
MSRPC_UUID_RRP = uuidtup_to_bin(('338CD001-2244-31F1-AAAA-900038001003', '1.0'))
class DCERPCSessionError(Exception):
def __init__(self, packet=None, error_code=None):
Exception.__init__(self)
self.packet = packet
if packet is not None:
self.error_code = packet['ErrorCode']
else:
self.error_code = error_code
def get_error_code(self):
return self.error_code
def get_packet(self):
return self.packet
def __str__(self):
key = self.error_code
if (key in system_errors.ERROR_MESSAGES):
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'RRP SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'RRP SessionError: unknown error code: 0x%x' % (self.error_code)
################################################################################
# CONSTANTS
################################################################################
# 2.2.2 PREGISTRY_SERVER_NAME
PREGISTRY_SERVER_NAME = PWCHAR
# 2.2.3 error_status_t
error_status_t = ULONG
# 2.2.5 RRP_UNICODE_STRING
RRP_UNICODE_STRING = RPC_UNICODE_STRING
PRRP_UNICODE_STRING = PRPC_UNICODE_STRING
# 2.2.4 REGSAM
REGSAM = ULONG
KEY_QUERY_VALUE = 0x00000001
KEY_SET_VALUE = 0x00000002
KEY_CREATE_SUB_KEY = 0x00000004
KEY_ENUMERATE_SUB_KEYS = 0x00000008
KEY_CREATE_LINK = 0x00000020
KEY_WOW64_64KEY = 0x00000100
KEY_WOW64_32KEY = 0x00000200
REG_BINARY = 3
REG_DWORD = 4
REG_DWORD_LITTLE_ENDIAN = 4
REG_DWORD_BIG_ENDIAN = 5
REG_EXPAND_SZ = 2
REG_LINK = 6
REG_MULTI_SZ = 7
REG_NONE = 0
REG_QWORD = 11
REG_QWORD_LITTLE_ENDIAN = 11
REG_SZ = 1
# 3.1.5.7 BaseRegCreateKey (Opnum 6)
REG_CREATED_NEW_KEY = 0x00000001
REG_OPENED_EXISTING_KEY = 0x00000002
# 3.1.5.19 BaseRegRestoreKey (Opnum 19)
# Flags
REG_WHOLE_HIVE_VOLATILE = 0x00000001
REG_REFRESH_HIVE = 0x00000002
REG_NO_LAZY_FLUSH = 0x00000004
REG_FORCE_RESTORE = 0x00000008
################################################################################
# STRUCTURES
################################################################################
# 2.2.1 RPC_HKEY
class RPC_HKEY(NDRSTRUCT):
structure = (
('context_handle_attributes',ULONG),
('context_handle_uuid',UUID),
)
def __init__(self, data=None,isNDR64=False):
NDRSTRUCT.__init__(self, data, isNDR64)
self['context_handle_uuid'] = '\x00' * 20
# 2.2.6 RVALENT
class RVALENT(NDRSTRUCT):
structure = (
('ve_valuename',PRRP_UNICODE_STRING),
('ve_valuelen',DWORD),
('ve_valueptr',DWORD),
('ve_type',DWORD),
)
class RVALENT_ARRAY(NDRUniConformantVaryingArray):
item = RVALENT
# 2.2.9 RPC_SECURITY_DESCRIPTOR
class BYTE_ARRAY(NDRUniConformantVaryingArray):
pass
class PBYTE_ARRAY(NDRPOINTER):
referent = (
('Data', BYTE_ARRAY),
)
class RPC_SECURITY_DESCRIPTOR(NDRSTRUCT):
structure = (
('lpSecurityDescriptor',PBYTE_ARRAY),
('cbInSecurityDescriptor',DWORD),
('cbOutSecurityDescriptor',DWORD),
)
# 2.2.8 RPC_SECURITY_ATTRIBUTES
class RPC_SECURITY_ATTRIBUTES(NDRSTRUCT):
structure = (
('nLength',DWORD),
('RpcSecurityDescriptor',RPC_SECURITY_DESCRIPTOR),
('bInheritHandle',BOOLEAN),
)
class PRPC_SECURITY_ATTRIBUTES(NDRPOINTER):
referent = (
('Data', RPC_SECURITY_ATTRIBUTES),
)
################################################################################
# RPC CALLS
################################################################################
# 3.1.5.1 OpenClassesRoot (Opnum 0)
class OpenClassesRoot(NDRCALL):
opnum = 0
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenClassesRootResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.2 OpenCurrentUser (Opnum 1)
class OpenCurrentUser(NDRCALL):
opnum = 1
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenCurrentUserResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.3 OpenLocalMachine (Opnum 2)
class OpenLocalMachine(NDRCALL):
opnum = 2
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenLocalMachineResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.4 OpenPerformanceData (Opnum 3)
class OpenPerformanceData(NDRCALL):
opnum = 3
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenPerformanceDataResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.5 OpenUsers (Opnum 4)
class OpenUsers(NDRCALL):
opnum = 4
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenUsersResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.6 BaseRegCloseKey (Opnum 5)
class BaseRegCloseKey(NDRCALL):
opnum = 5
structure = (
('hKey', RPC_HKEY),
)
class BaseRegCloseKeyResponse(NDRCALL):
structure = (
('hKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.7 BaseRegCreateKey (Opnum 6)
class BaseRegCreateKey(NDRCALL):
opnum = 6
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('lpClass', RRP_UNICODE_STRING),
('dwOptions', DWORD),
('samDesired', REGSAM),
('lpSecurityAttributes', PRPC_SECURITY_ATTRIBUTES),
('lpdwDisposition', LPULONG),
)
class BaseRegCreateKeyResponse(NDRCALL):
structure = (
('phkResult', RPC_HKEY),
('lpdwDisposition', LPULONG),
('ErrorCode', error_status_t),
)
# 3.1.5.8 BaseRegDeleteKey (Opnum 7)
class BaseRegDeleteKey(NDRCALL):
opnum = 7
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
)
class BaseRegDeleteKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.9 BaseRegDeleteValue (Opnum 8)
class BaseRegDeleteValue(NDRCALL):
opnum = 8
structure = (
('hKey', RPC_HKEY),
('lpValueName', RRP_UNICODE_STRING),
)
class BaseRegDeleteValueResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.10 BaseRegEnumKey (Opnum 9)
class BaseRegEnumKey(NDRCALL):
opnum = 9
structure = (
('hKey', RPC_HKEY),
('dwIndex', DWORD),
('lpNameIn', RRP_UNICODE_STRING),
('lpClassIn', PRRP_UNICODE_STRING),
('lpftLastWriteTime', PFILETIME),
)
class BaseRegEnumKeyResponse(NDRCALL):
structure = (
('lpNameOut', RRP_UNICODE_STRING),
('lplpClassOut', PRRP_UNICODE_STRING),
('lpftLastWriteTime', PFILETIME),
('ErrorCode', error_status_t),
)
# 3.1.5.11 BaseRegEnumValue (Opnum 10)
class BaseRegEnumValue(NDRCALL):
opnum = 10
structure = (
('hKey', RPC_HKEY),
('dwIndex', DWORD),
('lpValueNameIn', RRP_UNICODE_STRING),
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
)
class BaseRegEnumValueResponse(NDRCALL):
structure = (
('lpValueNameOut', RRP_UNICODE_STRING),
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
('ErrorCode', error_status_t),
)
# 3.1.5.12 BaseRegFlushKey (Opnum 11)
class BaseRegFlushKey(NDRCALL):
opnum = 11
structure = (
('hKey', RPC_HKEY),
)
class BaseRegFlushKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.13 BaseRegGetKeySecurity (Opnum 12)
class BaseRegGetKeySecurity(NDRCALL):
opnum = 12
structure = (
('hKey', RPC_HKEY),
('SecurityInformation', SECURITY_INFORMATION),
('pRpcSecurityDescriptorIn', RPC_SECURITY_DESCRIPTOR),
)
class BaseRegGetKeySecurityResponse(NDRCALL):
structure = (
('pRpcSecurityDescriptorOut', RPC_SECURITY_DESCRIPTOR),
('ErrorCode', error_status_t),
)
# 3.1.5.14 BaseRegLoadKey (Opnum 13)
class BaseRegLoadKey(NDRCALL):
opnum = 13
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('lpFile', RRP_UNICODE_STRING),
)
class BaseRegLoadKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.15 BaseRegOpenKey (Opnum 15)
class BaseRegOpenKey(NDRCALL):
opnum = 15
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('dwOptions', DWORD),
('samDesired', REGSAM),
)
class BaseRegOpenKeyResponse(NDRCALL):
structure = (
('phkResult', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.16 BaseRegQueryInfoKey (Opnum 16)
class BaseRegQueryInfoKey(NDRCALL):
opnum = 16
structure = (
('hKey', RPC_HKEY),
('lpClassIn', RRP_UNICODE_STRING),
)
class BaseRegQueryInfoKeyResponse(NDRCALL):
structure = (
('lpClassOut', RPC_UNICODE_STRING),
('lpcSubKeys', DWORD),
('lpcbMaxSubKeyLen', DWORD),
('lpcbMaxClassLen', DWORD),
('lpcValues', DWORD),
('lpcbMaxValueNameLen', DWORD),
('lpcbMaxValueLen', DWORD),
('lpcbSecurityDescriptor', DWORD),
('lpftLastWriteTime', FILETIME),
('ErrorCode', error_status_t),
)
# 3.1.5.17 BaseRegQueryValue (Opnum 17)
class BaseRegQueryValue(NDRCALL):
opnum = 17
structure = (
('hKey', RPC_HKEY),
('lpValueName', RRP_UNICODE_STRING),
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
)
class BaseRegQueryValueResponse(NDRCALL):
structure = (
('lpType', LPULONG),
('lpData', PBYTE_ARRAY),
('lpcbData', LPULONG),
('lpcbLen', LPULONG),
('ErrorCode', error_status_t),
)
# 3.1.5.18 BaseRegReplaceKey (Opnum 18)
class BaseRegReplaceKey(NDRCALL):
opnum = 18
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('lpNewFile', RRP_UNICODE_STRING),
('lpOldFile', RRP_UNICODE_STRING),
)
class BaseRegReplaceKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.19 BaseRegRestoreKey (Opnum 19)
class BaseRegRestoreKey(NDRCALL):
opnum = 19
structure = (
('hKey', RPC_HKEY),
('lpFile', RRP_UNICODE_STRING),
('Flags', DWORD),
)
class BaseRegRestoreKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.20 BaseRegSaveKey (Opnum 20)
class BaseRegSaveKey(NDRCALL):
opnum = 20
structure = (
('hKey', RPC_HKEY),
('lpFile', RRP_UNICODE_STRING),
('pSecurityAttributes', PRPC_SECURITY_ATTRIBUTES),
)
class BaseRegSaveKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.21 BaseRegSetKeySecurity (Opnum 21)
class BaseRegSetKeySecurity(NDRCALL):
opnum = 21
structure = (
('hKey', RPC_HKEY),
('SecurityInformation', SECURITY_INFORMATION),
('pRpcSecurityDescriptor', RPC_SECURITY_DESCRIPTOR),
)
class BaseRegSetKeySecurityResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.22 BaseRegSetValue (Opnum 22)
class BaseRegSetValue(NDRCALL):
opnum = 22
structure = (
('hKey', RPC_HKEY),
('lpValueName', RRP_UNICODE_STRING),
('dwType', DWORD),
('lpData', NDRUniConformantArray),
('cbData', DWORD),
)
class BaseRegSetValueResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.23 BaseRegUnLoadKey (Opnum 23)
class BaseRegUnLoadKey(NDRCALL):
opnum = 23
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
)
class BaseRegUnLoadKeyResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.24 BaseRegGetVersion (Opnum 26)
class BaseRegGetVersion(NDRCALL):
opnum = 26
structure = (
('hKey', RPC_HKEY),
)
class BaseRegGetVersionResponse(NDRCALL):
structure = (
('lpdwVersion', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.5.25 OpenCurrentConfig (Opnum 27)
class OpenCurrentConfig(NDRCALL):
opnum = 27
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenCurrentConfigResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.26 BaseRegQueryMultipleValues (Opnum 29)
class BaseRegQueryMultipleValues(NDRCALL):
opnum = 29
structure = (
('hKey', RPC_HKEY),
('val_listIn', RVALENT_ARRAY),
('num_vals', DWORD),
('lpvalueBuf', PBYTE_ARRAY),
('ldwTotsize', DWORD),
)
class BaseRegQueryMultipleValuesResponse(NDRCALL):
structure = (
('val_listOut', RVALENT_ARRAY),
('lpvalueBuf', PBYTE_ARRAY),
('ldwTotsize', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.5.27 BaseRegSaveKeyEx (Opnum 31)
class BaseRegSaveKeyEx(NDRCALL):
opnum = 31
structure = (
('hKey', RPC_HKEY),
('lpFile', RRP_UNICODE_STRING),
('pSecurityAttributes', PRPC_SECURITY_ATTRIBUTES),
('Flags', DWORD),
)
class BaseRegSaveKeyExResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
# 3.1.5.28 OpenPerformanceText (Opnum 32)
class OpenPerformanceText(NDRCALL):
opnum = 32
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenPerformanceTextResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.29 OpenPerformanceNlsText (Opnum 33)
class OpenPerformanceNlsText(NDRCALL):
opnum = 33
structure = (
('ServerName', PREGISTRY_SERVER_NAME),
('samDesired', REGSAM),
)
class OpenPerformanceNlsTextResponse(NDRCALL):
structure = (
('phKey', RPC_HKEY),
('ErrorCode', error_status_t),
)
# 3.1.5.30 BaseRegQueryMultipleValues2 (Opnum 34)
class BaseRegQueryMultipleValues2(NDRCALL):
opnum = 34
structure = (
('hKey', RPC_HKEY),
('val_listIn', RVALENT_ARRAY),
('num_vals', DWORD),
('lpvalueBuf', PBYTE_ARRAY),
('ldwTotsize', DWORD),
)
class BaseRegQueryMultipleValues2Response(NDRCALL):
structure = (
('val_listOut', RVALENT_ARRAY),
('lpvalueBuf', PBYTE_ARRAY),
('ldwRequiredSize', DWORD),
('ErrorCode', error_status_t),
)
# 3.1.5.31 BaseRegDeleteKeyEx (Opnum 35)
class BaseRegDeleteKeyEx(NDRCALL):
opnum = 35
structure = (
('hKey', RPC_HKEY),
('lpSubKey', RRP_UNICODE_STRING),
('AccessMask', REGSAM),
('Reserved', DWORD),
)
class BaseRegDeleteKeyExResponse(NDRCALL):
structure = (
('ErrorCode', error_status_t),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0: (OpenClassesRoot, OpenClassesRootResponse),
1: (OpenCurrentUser, OpenCurrentUserResponse),
2: (OpenLocalMachine, OpenLocalMachineResponse),
3: (OpenPerformanceData, OpenPerformanceDataResponse),
4: (OpenUsers, OpenUsersResponse),
5: (BaseRegCloseKey, BaseRegCloseKeyResponse),
6: (BaseRegCreateKey, BaseRegCreateKeyResponse),
7: (BaseRegDeleteKey, BaseRegDeleteKeyResponse),
8: (BaseRegDeleteValue, BaseRegDeleteValueResponse),
9: (BaseRegEnumKey, BaseRegEnumKeyResponse),
10: (BaseRegEnumValue, BaseRegEnumValueResponse),
11: (BaseRegFlushKey, BaseRegFlushKeyResponse),
12: (BaseRegGetKeySecurity, BaseRegGetKeySecurityResponse),
13: (BaseRegLoadKey, BaseRegLoadKeyResponse),
15: (BaseRegOpenKey, BaseRegOpenKeyResponse),
16: (BaseRegQueryInfoKey, BaseRegQueryInfoKeyResponse),
17: (BaseRegQueryValue, BaseRegQueryValueResponse),
18: (BaseRegReplaceKey, BaseRegReplaceKeyResponse),
19: (BaseRegRestoreKey, BaseRegRestoreKeyResponse),
20: (BaseRegSaveKey, BaseRegSaveKeyResponse),
21: (BaseRegSetKeySecurity, BaseRegSetKeySecurityResponse),
22: (BaseRegSetValue, BaseRegSetValueResponse),
23: (BaseRegUnLoadKey, BaseRegUnLoadKeyResponse),
26: (BaseRegGetVersion, BaseRegGetVersionResponse),
27: (OpenCurrentConfig, OpenCurrentConfigResponse),
29: (BaseRegQueryMultipleValues, BaseRegQueryMultipleValuesResponse),
31: (BaseRegSaveKeyEx, BaseRegSaveKeyExResponse),
32: (OpenPerformanceText, OpenPerformanceTextResponse),
33: (OpenPerformanceNlsText, OpenPerformanceNlsTextResponse),
34: (BaseRegQueryMultipleValues2, BaseRegQueryMultipleValues2Response),
35: (BaseRegDeleteKeyEx, BaseRegDeleteKeyExResponse),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def checkNullString(string):
if string == NULL:
return string
if string[-1:] != '\x00':
return string + '\x00'
else:
return string
def unpackValue(valueType, value):
if valueType == REG_DWORD:
retData = unpack('<L', ''.join(value))[0]
elif valueType == REG_DWORD_BIG_ENDIAN:
retData = unpack('>L', ''.join(value))[0]
elif valueType == REG_EXPAND_SZ:
retData = ''.join(value).decode('utf-16le')
elif valueType == REG_MULTI_SZ:
retData = ''.join(value).decode('utf-16le')
elif valueType == REG_QWORD:
retData = unpack('<Q', ''.join(value))[0]
elif valueType == REG_QWORD_LITTLE_ENDIAN:
retData = unpack('>Q', ''.join(value))[0]
elif valueType == REG_SZ:
retData = ''.join(value).decode('utf-16le')
else:
retData = ''.join(value)
return retData
def hOpenClassesRoot(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenClassesRoot()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenCurrentUser(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenCurrentUser()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenLocalMachine(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenLocalMachine()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenPerformanceData(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenPerformanceData()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenUsers(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenUsers()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegCloseKey(dce, hKey):
request = BaseRegCloseKey()
request['hKey'] = hKey
return dce.request(request)
def hBaseRegCreateKey(dce, hKey, lpSubKey, lpClass=NULL, dwOptions=0x00000001, samDesired=MAXIMUM_ALLOWED, lpSecurityAttributes=NULL, lpdwDisposition=REG_CREATED_NEW_KEY):
request = BaseRegCreateKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['lpClass'] = checkNullString(lpClass)
request['dwOptions'] = dwOptions
request['samDesired'] = samDesired
if lpSecurityAttributes == NULL:
request['lpSecurityAttributes']['RpcSecurityDescriptor']['lpSecurityDescriptor'] = NULL
else:
request['lpSecurityAttributes'] = lpSecurityAttributes
request['lpdwDisposition'] = lpdwDisposition
return dce.request(request)
def hBaseRegDeleteKey(dce, hKey, lpSubKey):
request = BaseRegDeleteKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
return dce.request(request)
def hBaseRegEnumKey(dce, hKey, dwIndex, lpftLastWriteTime=NULL):
request = BaseRegEnumKey()
request['hKey'] = hKey
request['dwIndex'] = dwIndex
request.fields['lpNameIn'].fields['MaximumLength'] = 1024
request.fields['lpNameIn'].fields['Data'].fields['Data'].fields['MaximumCount'] = 1024 / 2
request['lpClassIn'] = ' ' * 64
request['lpftLastWriteTime'] = lpftLastWriteTime
return dce.request(request)
def hBaseRegEnumValue(dce, hKey, dwIndex):
# ToDo, check the result to see whether we need to
# have a bigger buffer for the data to receive
request = BaseRegEnumValue()
request['hKey'] = hKey
request['dwIndex'] = dwIndex
request['lpValueNameIn'] = ' ' * 128
request['lpData'] = ' ' * 128
request['lpcbData'] = 128
request['lpcbLen'] = 128
return dce.request(request)
def hBaseRegFlushKey(dce, hKey):
request = BaseRegFlushKey()
request['hKey'] = hKey
return dce.request(request)
def hBaseRegGetKeySecurity(dce, hKey, securityInformation=OWNER_SECURITY_INFORMATION):
request = BaseRegGetKeySecurity()
request['hKey'] = hKey
request['SecurityInformation'] = securityInformation
request['pRpcSecurityDescriptorIn']['lpSecurityDescriptor'] = NULL
request['pRpcSecurityDescriptorIn']['cbInSecurityDescriptor'] = 1024
return dce.request(request)
def hBaseRegLoadKey(dce, hKey, lpSubKey, lpFile):
request = BaseRegLoadKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['lpFile'] = checkNullString(lpFile)
return dce.request(request)
def hBaseRegUnLoadKey(dce, hKey, lpSubKey):
request = BaseRegUnLoadKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
return dce.request(request)
def hBaseRegOpenKey(dce, hKey, lpSubKey, dwOptions=0x00000001, samDesired=MAXIMUM_ALLOWED):
request = BaseRegOpenKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['dwOptions'] = dwOptions
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegQueryInfoKey(dce, hKey):
request = BaseRegQueryInfoKey()
request['hKey'] = hKey
# Not the cleanest way, but oh well
# Plus, Windows XP needs MaximumCount also set
request.fields['lpClassIn'].fields['MaximumLength'] = 1024
request.fields['lpClassIn'].fields['Data'].fields['Data'].fields['MaximumCount'] = 1024 / 2
return dce.request(request)
def hBaseRegQueryValue(dce, hKey, lpValueName):
# ToDo, check the result to see whether we need to
# have a bigger buffer for the data to receive
request = BaseRegQueryValue()
request['hKey'] = hKey
request['lpValueName'] = checkNullString(lpValueName)
request['lpData'] = ' ' * 512
request['lpcbData'] = 512
request['lpcbLen'] = 512
resp = dce.request(request)
# Returns
# ( dataType, data )
return resp['lpType'], unpackValue(resp['lpType'], resp['lpData'])
def hBaseRegReplaceKey(dce, hKey, lpSubKey, lpNewFile, lpOldFile):
request = BaseRegReplaceKey()
request['hKey'] = hKey
request['lpSubKey'] = checkNullString(lpSubKey)
request['lpNewFile'] = checkNullString(lpNewFile)
request['lpOldFile'] = checkNullString(lpOldFile)
return dce.request(request)
def hBaseRegRestoreKey(dce, hKey, lpFile, flags=REG_REFRESH_HIVE):
request = BaseRegRestoreKey()
request['hKey'] = hKey
request['lpFile'] = checkNullString(lpFile)
request['Flags'] = flags
return dce.request(request)
def hBaseRegSaveKey(dce, hKey, lpFile, pSecurityAttributes=NULL):
request = BaseRegSaveKey()
request['hKey'] = hKey
request['lpFile'] = checkNullString(lpFile)
request['pSecurityAttributes'] = pSecurityAttributes
return dce.request(request)
def hBaseRegSetValue(dce, hKey, lpValueName, dwType, lpData):
request = BaseRegSetValue()
request['hKey'] = hKey
request['lpValueName'] = checkNullString(lpValueName)
request['dwType'] = dwType
request['lpData'] = lpData.encode('utf-16le')
request['cbData'] = len(request['lpData'])
return dce.request(request)
def hBaseRegGetVersion(dce, hKey):
request = BaseRegGetVersion()
request['hKey'] = hKey
return dce.request(request)
def hOpenCurrentConfig(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenCurrentConfig()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegQueryMultipleValues(dce, hKey, val_listIn):
# ToDo, check the result to see whether we need to
# have a bigger buffer for the data to receive
request = BaseRegQueryMultipleValues()
request['hKey'] = hKey
for item in val_listIn:
itemn = RVALENT()
itemn['ve_valuename'] = checkNullString(item['ValueName'])
itemn['ve_valuelen'] = len(itemn['ve_valuename'])
itemn['ve_valueptr'] = NULL
itemn['ve_type'] = item['ValueType']
request['val_listIn'].append(itemn)
request['num_vals'] = len(request['val_listIn'])
request['lpvalueBuf'] = list(' ' * 128)
request['ldwTotsize'] = 128
resp = dce.request(request)
retVal = list()
for item in resp['val_listOut']:
itemn = {}
itemn['ValueName'] = item['ve_valuename']
itemn['ValueData'] = unpackValue(item['ve_type'], resp['lpvalueBuf'][item['ve_valueptr']: item['ve_valueptr'] + item['ve_valuelen']])
retVal.append(itemn)
return retVal
def hBaseRegSaveKeyEx(dce, hKey, lpFile, pSecurityAttributes=NULL, flags=1):
request = BaseRegSaveKeyEx()
request['hKey'] = hKey
request['lpFile'] = checkNullString(lpFile)
request['pSecurityAttributes'] = pSecurityAttributes
request['Flags'] = flags
return dce.request(request)
def hOpenPerformanceText(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenPerformanceText()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hOpenPerformanceNlsText(dce, samDesired=MAXIMUM_ALLOWED):
request = OpenPerformanceNlsText()
request['ServerName'] = NULL
request['samDesired'] = samDesired
return dce.request(request)
def hBaseRegDeleteValue(dce, hKey, lpValueName):
request = BaseRegDeleteValue()
request['hKey'] = hKey
request['lpValueName'] = checkNullString(lpValueName)
return dce.request(request)
|
the-stack_106_31639 | #!/usr/bin/env python3
import googlemaps
import json
#gmaps = googlemaps.Client(key="AIzaSyBm3Vv7k-8DiE_uBvptymYypVtYlGnqF8g")
f = open("hospitals_src.json", "r", encoding = "UTF-8")
hospitals = json.load(f)
f.close()
results = []
result = bytes()
size = 0
for h in hospitals:
#result = gmaps.geocode(h["機構地址"])
#print(result)
print(h["機構地址"])
s = h["機構地址"].encode("big5", errors="ignore")
if size + len(s) < 10000:
result += s
result += '\n'.encode("big5")
size += len(s) + 1
else:
results.append(result)
result = bytes()
size = 0
if result:
results.append(result)
i = 0
for result in results:
f = open("hospitals_%d.csv" % i, "wb")
f.write(result)
f.close()
i += 1
|
the-stack_106_31640 | from playwright.sync_api import sync_playwright
with sync_playwright() as p:
browser = p.firefox.launch()
page = browser.new_page()
listalinks = []
# Este range é setado manualmente, significa a quantidade de páginas/imagens que serão baixadas.
# O processo é um pouco lento, então os downloads são feitos por partes.
for c in range(101, 102+1): # 1585pgs max
print(c)
try:
page.goto('https://wallpaperscraft.com/catalog/nature/1920x1080' + f'/page{c}')
mini15 = page.query_selector_all('.wallpapers__image')
for cc in mini15:
try:
with page.expect_popup() as popup_info:
cc.click(button='middle')
aba = popup_info.value
aba.click('text=Download wallpaper 1920x1080')
listalinks.append(aba.url)
aba.close()
except:
print('Erro ao abrir aba!')
except:
print('Erro ao abrir página!')
print(len(listalinks))
try:
for c in listalinks:
binarios = page.goto(c).body()
nomeimage = page.url
nomeimage = nomeimage[48:] # Cada imagem recebe um nome único, derivado de seu próprio link.
caminho = r'C:\Users\Daniel\Desktop\wallpaperscraft.com nature\\'
open(caminho + nomeimage, 'wb').write(binarios)
except:
print('Erro ao abrir link da imagem ou nos binários!')
print('FIM')
browser.close()
|
the-stack_106_31642 | import requests
from PIL import Image,ImageFilter
from PIL import ImageFilter
from PIL import ImageDraw
h = 700
w = 700
name = input("What Name? ")
color = input("What Color Should the back ground be? ")
busthead = input("Bust OR head: ")
r = requests.get(f"https://api.mojang.com/users/profiles/minecraft/{name}")
rdata = r.json()
uuid = rdata["id"]
names = rdata["name"]
req = requests.get(f"https://visage.surgeplay.com/{busthead}/512/{uuid}")
with open("face.png","wb") as f:
f.write(req.content)
img = Image.open("face.png")
res = img.convert("RGB")
pink = 255,192,203
width = res.size[0]
height = res.size[1]
for x in range(0,width):
for y in range(0,height):
data = res.getpixel((x, y))
if color == "pink":
if (data[0] == 0 and data[1] == 0 and data[2] == 0 ):
res.putpixel((x, y), (255,192,203))
elif color == "red":
if (data[0] == 0 and data[1] == 0 and data[2] == 0 ):
res.putpixel((x, y), (255,0,0))
elif color == "blue":
if (data[0] == 0 and data[1] == 0 and data[2] == 0 ):
res.putpixel((x, y), (173,216,230))
font = ImageDraw.ImageFont.truetype("Reglisse-0WOD9.otf",size=100)
d = ImageDraw.Draw(res)
d.text((88,451),text=names,font=font)
res.save("Finish.png")
imgfil = Image.open("Finish.png")
img3 = imgfil.filter(ImageFilter.SHARPEN)
img3.save("Finish.png")
|
the-stack_106_31643 | '''RegNet in PyTorch.
Paper: "Designing Network Design Spaces".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BlockX(nn.Module):
def __init__(self, w_in, w_out, stride, bottleneck_ratio, num_groups):
super(BlockX, self).__init__()
# 1x1
w_b = int(round(w_out * bottleneck_ratio))
self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(w_b)
# 3x3
groups = w_b // num_groups
self.conv2 = nn.Conv2d(w_b, w_b, kernel_size=3,
stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(w_b)
# 1x1
self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(w_out)
self.shortcut = nn.Sequential()
if stride != 1 or w_in != w_out:
self.shortcut = nn.Sequential(
nn.Conv2d(w_in, w_out,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(w_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class RegNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(RegNet, self).__init__()
self.cfg = cfg
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(0)
self.layer2 = self._make_layer(1)
self.layer3 = self._make_layer(2)
self.layer4 = self._make_layer(3)
self.linear = nn.Linear(self.cfg['widths'][-1], num_classes)
def _make_layer(self, idx):
depth = self.cfg['depths'][idx]
width = self.cfg['widths'][idx]
stride = self.cfg['strides'][idx]
num_groups = self.cfg['num_groups']
bottleneck_ratio = self.cfg['bottleneck_ratio']
layers = []
for i in range(depth):
s = stride if i == 0 else 1
layers.append(BlockX(self.in_planes, width,
s, bottleneck_ratio, num_groups))
self.in_planes = width
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def RegNetX_200MF():
cfg = {
'depths': [1, 1, 4, 7],
'widths': [24, 56, 152, 368],
'strides': [1, 1, 2, 2],
'num_groups': 8,
'bottleneck_ratio': 1,
}
return RegNet(cfg)
def test():
net = RegNetX_200MF()
print(net)
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
|
the-stack_106_31644 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.mininode import CTransaction, COIN
from test_framework.test_framework import RapidsTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
bytes_to_hex_str,
hex_str_to_bytes,
sync_mempools,
)
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(RapidsTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
self.nodes[1].getnewaddress(): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid})
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert not [tx for tx in self.nodes[0].listtransactions("*", 100, 0, False) if "label" in tx and tx["label"] == "watchonly"]
txs = [tx for tx in self.nodes[0].listtransactions("*", 100, 0, True) if "label" in tx and tx['label'] == 'watchonly']
assert_array_result(txs, {"category": "receive", "amount": Decimal("0.1")}, {"txid": txid})
if __name__ == '__main__':
ListTransactionsTest().main()
|
the-stack_106_31647 | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import contextlib
from xml.dom import minidom
from oslo_serialization import jsonutils
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.openstack.common import log as logging
from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
@contextlib.contextmanager
def translate_exceptions():
"""Translate nova exceptions to http exceptions."""
try:
yield
except exception.Invalid as exp:
msg = exp.format_message()
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
raise exc.HTTPForbidden(explanation=msg)
except exception.NoUniqueMatch as exp:
msg = exp.format_message()
raise exc.HTTPConflict(explanation=msg)
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a secuity group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
with translate_exceptions():
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
with translate_exceptions():
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
with translate_exceptions():
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except exception.SecurityGroupNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except Exception as exp:
raise exc.HTTPBadRequest(explanation=six.text_type(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
with translate_exceptions():
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
with translate_exceptions():
instance = self.compute_api.get(context, server_id,
want_objects=True)
groups = self.security_group_api.get_instance_security_groups(
context, instance.uuid, True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
with translate_exceptions():
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = minidom.parseString(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
self._extend_servers(req, list(resp_obj.obj['servers']))
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2013-05-28T00:00:00Z"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
|
the-stack_106_31648 | # -*- coding: utf-8 -*-
#
# Configuration file for Sphinx builds for the zhmcclient project.
#
# Originally created by sphinx-quickstart, but then manually maintained.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# pylint: disable=invalid-name
"""
Config file for Sphinx.
"""
import sys
import os
import inspect
# Imports used by code for autoautosummary
from docutils.parsers.rst import directives
from sphinx.ext.autosummary import Autosummary
from sphinx.ext.autosummary import get_documenter
from sphinx.util.inspect import safe_getattr
from sphinx.util import logging
def get_version(version_file):
"""
Execute the specified version file and return the value of the __version__
global variable that is set in the version file.
Note: Make sure the version file does not depend on any packages in the
requirements list of this package (otherwise it cannot be executed in
a fresh Python environment).
"""
with open(version_file, 'r') as fp:
version_source = fp.read()
_globals = {}
exec(version_source, _globals) # pylint: disable=exec-used
return _globals['__version__']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode', # disabed, raises anexception
'sphinx.ext.ifconfig',
'sphinx_git', # requires 'sphinx-git' Python package
# Note: sphinx_rtd_theme is not compatible with sphinxcontrib.fulltoc,
# but since it already provides a full TOC in the navigation pane, the
# sphinxcontrib.fulltoc extension is not needed.
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
master_doc = 'index'
else:
master_doc = 'docs/index'
# General information about the project.
project = u'zhmcclient'
copyright = u'IBM' # pylint: disable=redefined-builtin
author = u'zhmcclient team'
# The short description of the package.
_short_description = u'Client library for IBM Z Hardware Management Console ' \
u'Web Services API'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# RTD builds its versions as follows:
# - latest: Is triggered by merges to the master branch that do not have a tag.
# - stable: Is triggered by merges to any branch that do have a tag.
# As a result of this trigger logic, the master branch does not build when
# a version of the package is released, but only builds upon the next merge
# afterwards. The latest version can be triggered manually if that is an issue.
# The short X.Y version.
# Note: We use the full version in both cases (e.g. 'M.N.U' or 'M.N.U.dev0').
version = get_version(os.path.join('..', 'zhmcclient', '_version.py'))
# The full version, including alpha/beta/rc tags.
release = version
# Some prints, for extra information
print("conf.py: pwd: %s" % os.getcwd())
print("conf.py: zhmcclient version: %s" % version)
print("conf.py: Last 5 commits:")
sys.stdout.flush()
os.system('git log --decorate --oneline |head -5')
print("conf.py: End of commits")
sys.stdout.flush()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["README.rst", "try", "design",
"tests", ".tox", ".git", "attic", "dist",
"build_doc", "zhmcclient.egg-info", ".eggs"]
# The reST default role (used for this markup: `text`) to use for all
# documents. None means it is rendered in italic, without a link.
default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for Napoleon extension ---------------------------------------
# Enable support for Google style docstrings. Defaults to True.
napoleon_google_docstring = True
# Enable support for NumPy style docstrings. Defaults to True.
napoleon_numpy_docstring = False
# Include private members (like _membername). False to fall back to Sphinx’s
# default behavior. Defaults to False.
napoleon_include_private_with_doc = False
# Include special members (like __membername__). False to fall back to Sphinx’s
# default behavior. Defaults to True.
napoleon_include_special_with_doc = True
# Use the .. admonition:: directive for the Example and Examples sections,
# instead of the .. rubric:: directive. Defaults to False.
napoleon_use_admonition_for_examples = False
# Use the .. admonition:: directive for Notes sections, instead of the
# .. rubric:: directive. Defaults to False.
napoleon_use_admonition_for_notes = False
# Use the .. admonition:: directive for References sections, instead of the
# .. rubric:: directive. Defaults to False.
napoleon_use_admonition_for_references = False
# Use the :ivar: role for instance variables, instead of the .. attribute::
# directive. Defaults to False.
napoleon_use_ivar = True
# Use a :param: role for each function parameter, instead of a single
# :parameters: role for all the parameters. Defaults to True.
napoleon_use_param = True
# Use the :rtype: role for the return type, instead of inlining it with the
# description. Defaults to True.
napoleon_use_rtype = True
# -- Options for viewcode extension ---------------------------------------
# Follow alias objects that are imported from another module such as functions,
# classes and attributes. As side effects, this option ... ???
# If false, ... ???.
# The default is True.
viewcode_import = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
# See http://www.sphinx-doc.org/en/stable/theming.html for built-in themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further.
# See http://www.sphinx-doc.org/en/stable/theming.html for the options
# available for built-in themes.
# For options of the 'sphinx_rtd_theme', see
# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html
html_theme_options = {
'style_external_links': False,
'collapse_navigation': False,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If not defined, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = 'ld'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['_extra']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = project+'_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project+'.tex', _short_description, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, _short_description, [author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, _short_description,
author, project, _short_description,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for autodoc extension ----------------------------------------
# For documentation, see
# http://www.sphinx-doc.org/en/stable/ext/autodoc.html
# Selects what content will be inserted into a class description.
# The possible values are:
# "class" - Only the class’ docstring is inserted. This is the default.
# "both" - Both the class’ and the __init__ method’s docstring are
# concatenated and inserted.
# "init" - Only the __init__ method’s docstring is inserted.
# In all cases, the __init__ method is still independently rendered as a
# special method, e.g. when the :special-members: option is set.
autoclass_content = "both"
# Selects if automatically documented members are sorted alphabetically
# (value 'alphabetical'), by member type (value 'groupwise') or by source
# order (value 'bysource'). The default is alphabetical.
autodoc_member_order = "bysource"
# This value is a list of autodoc directive flags that should be automatically
# applied to all autodoc directives. The supported flags are:
# 'members', 'undoc-members', 'private-members', 'special-members',
# 'inherited-members' and 'show-inheritance'.
# If you set one of these flags in this config value, you can use a negated
# form, 'no-flag', in an autodoc directive, to disable it once.
autodoc_default_flags = []
# Functions imported from C modules cannot be introspected, and therefore the
# signature for such functions cannot be automatically determined. However, it
# is an often-used convention to put the signature into the first line of the
# function’s docstring.
# If this boolean value is set to True (which is the default), autodoc will
# look at the first line of the docstring for functions and methods, and if it
# looks like a signature, use the line as the signature and remove it from the
# docstring content.
autodoc_docstring_signature = True
# This value contains a list of modules to be mocked up. This is useful when
# some external dependencies are not met at build time and break the building
# process.
autodoc_mock_imports = []
# -- Options for intersphinx extension ------------------------------------
# For documentation, see
# http://www.sphinx-doc.org/en/stable/ext/intersphinx.html
# Defines the prefixes for intersphinx links, and the targets they resolve to.
# Example RST source for 'py2' prefix:
# :func:`py2:platform.dist`
#
# Note: The URLs apparently cannot be the same for two different IDs; otherwise
# the links for one of them are not being created. A small difference
# such as adding a trailing backslash is already sufficient to work
# around the problem.
#
# Note: This mapping does not control how links to datatypes of function
# parameters are generated.
# TODO: Find out how the targeted Python version for auto-generated links
# to datatypes of function parameters can be controlled.
#
intersphinx_mapping = {
'py': ('https://docs.python.org/3/', None), # agnostic to Python version
'py2': ('https://docs.python.org/2', None), # specific to Python 2
'py3': ('https://docs.python.org/3', None), # specific to Python 3
}
intersphinx_cache_limit = 5
# -- Options for extlinks extension ---------------------------------------
# For documentation, see
# http://www.sphinx-doc.org/en/stable/ext/extlinks.html
#
# Defines aliases for external links that can be used as role names.
#
# This config value must be a dictionary of external sites, mapping unique
# short alias names to a base URL and a prefix:
# * key: alias-name
# * value: tuple of (base-url, prefix)
#
# Example for the config value:
#
# extlinks = {
# 'issue': ('https://github.com/sphinx-doc/sphinx/issues/%s', 'Issue ')
# }
#
# The alias-name can be used as a role in links. In the example, alias name
# 'issue' is used in RST as follows:
# :issue:`123`.
# This then translates into a link:
# https://github.com/sphinx-doc/sphinx/issues/123
# where the %s in the base-url was replaced with the value between back quotes.
#
# The prefix plays a role only for the link caption:
# * If the prefix is None, the link caption is the full URL.
# * If the prefix is the empty string, the link caption is the partial URL
# given in the role content ("123" in this case.)
# * If the prefix is a non-empty string, the link caption is the partial URL,
# prepended by the prefix. In the above example, the link caption would be
# "Issue 123".
#
# You can also use the usual "explicit title" syntax supported by other roles
# that generate links to set the caption. In this case, the prefix is not
# relevant.
# For example, this RST:
# :issue:`this issue <123>`
# results in the link caption "this issue".
extlinks = {
'nbview': ('http://nbviewer.jupyter.org/github/zhmcclient/'
'python-zhmcclient/blob/master/docs/notebooks/%s', ''),
'nbdown': ('https://github.com/zhmcclient/python-zhmcclient/'
'raw/master/docs/notebooks/%s', '')
}
# -- Support for autoautosummary ----------------------------------------------
#
# Idea taken from https://stackoverflow.com/a/30783465/1424462
#
class AutoAutoSummary(Autosummary):
"""
Sphinx extension that automatically generates a table of public methods or
attributes of a class, using the AutoSummary extension.
(i.e. each row in the table shows the method or attribute name with a
link to the full description, and a one-line brief description).
Usage in RST source::
.. autoclass:: path.to.class
:<autoclass-options>:
.. rubric:: Methods
.. autoautosummary:: path.to.class
:methods:
.. rubric:: Attributes
.. autoautosummary:: path.to.class
:attributes:
.. rubric:: Details
"""
option_spec = {
'methods': directives.unchanged,
'attributes': directives.unchanged
}
option_spec.update(Autosummary.option_spec)
required_arguments = 1 # Fully qualified class name
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__) # requires Sphinx 1.6.1
self._log_prefix = "conf.py/AutoAutoSummary"
self._excluded_classes = ['BaseException']
super(AutoAutoSummary, self).__init__(*args, **kwargs)
def _get_members(self, class_obj, member_type, include_in_public=None):
"""
Return class members of the specified type.
class_obj: Class object.
member_type: Member type ('method' or 'attribute').
include_in_public: set/list/tuple with member names that should be
included in public members in addition to the public names (those
starting without underscore).
Returns:
tuple(public_members, all_members): Names of the class members of
the specified member type (public / all).
"""
try:
app = self.state.document.settings.env.app
except AttributeError:
app = None
if not include_in_public:
include_in_public = []
all_members = []
for member_name in dir(class_obj):
try:
documenter = get_documenter(
app,
safe_getattr(class_obj, member_name),
class_obj)
except AttributeError:
continue
if documenter.objtype == member_type:
all_members.append(member_name)
public_members = [x for x in all_members
if x in include_in_public or not x.startswith('_')]
return public_members, all_members
def _get_def_class(self, class_obj, member_name):
"""
Return the class object in MRO order that defines a member.
class_obj: Class object that exposes (but not necessarily defines) the
member. I.e. starting point of the search.
member_name: Name of the member (method or attribute).
Returns:
Class object that defines the member.
"""
for def_class_obj in inspect.getmro(class_obj):
if member_name in def_class_obj.__dict__:
if def_class_obj.__name__ in self._excluded_classes:
return class_obj # Fall back to input class
return def_class_obj
self._logger.warning(
"%s: Definition class not found for member %s.%s, "
"defaulting to class %s",
self._log_prefix, class_obj.__name__, member_name,
class_obj.__name__)
return class_obj # Input class is better than nothing
def run(self):
"""
Run the extension.
"""
try:
full_class_name = str(self.arguments[0])
module_name, class_name = full_class_name.rsplit('.', 1)
module_obj = __import__(module_name, globals(), locals(),
[class_name])
class_obj = getattr(module_obj, class_name)
if 'methods' in self.options:
_, methods = self._get_members(
class_obj, 'method', ['__init__'])
self.content = [
"~%s.%s" % (
self._get_def_class(class_obj, method).__name__,
method)
for method in methods if not method.startswith('_')
]
elif 'attributes' in self.options:
_, attributes = self._get_members(class_obj, 'attribute')
self.content = [
"~%s.%s" % (
self._get_def_class(class_obj, attrib).__name__,
attrib)
for attrib in attributes if not attrib.startswith('_')
]
except Exception as exc: # pylint: disable=broad-except
self._logger.error(
"%s: Internal error: %s: %s",
self._log_prefix, exc.__class__.__name__, exc)
finally:
return super(AutoAutoSummary, self).run()
def setup(app):
"""
Called by Sphinx. Registers the AutoAutoSummary extension.
"""
app.add_directive('autoautosummary', AutoAutoSummary)
|
the-stack_106_31653 | import simplejson as json
from prettyparse import create_parser
from komprenu.model import Model
usage = '''
Train a new grammar model on the input json data
:data_json str
Data file to load from
:model_json str
Model file to write to
:--latent-len -l int 100
Number of latent bits in model
:--vocab-size -v int 100
Number of words in limited grammar dictionary
:--lines -li int 10
lines to train on
:--iterations -i int 1000
Number of iterations to train the model
'''
def main():
args = create_parser(usage).parse_args()
print('Loading json data...')
with open(args.data_json) as f:
conversations = json.load(f)
print('Creating empty model...')
model = Model(args.vocab_size, args.latent_len)
try:
model.train((j for i in conversations for j in i), args.vocab_size, args.iterations,
args.lines)
finally:
model.save(args.model_json)
for i in range(10):
print(' '.join(model.walk(10)))
if __name__ == '__main__':
main()
|
the-stack_106_31654 | """A Link represents the predicate-object portion of a triple."""
import abc
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
from pydantic import validator
from rdflib import BNode, Graph, Literal, Namespace, RDF, URIRef, XSD
from altimeter.core.base_model import BaseImmutableModel
from altimeter.core.graph import SCALAR_TYPES
from altimeter.core.graph.node_cache import NodeCache
class BaseLink(BaseImmutableModel, abc.ABC):
"""A link represents the predicate-object portion of a triple.
BaseLink is an abstract base class for Link subclasses.
"""
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
def to_lpg(self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
parent: a dictionary og the parent
vertices: a list of dictionaries of the vertices for a labeled property graph
edges: a list of dictionaries of the edges for a labeled property graph
prefix: a prefix to add to the attribute name
"""
class SimpleLink(BaseLink):
"""A SimpleLink represents a scalar value. In RDF terms a SimpleLink creates a Literal
in the graph."""
pred: str
obj: Any
# pylint: disable=no-self-argument,no-self-use
@validator("obj")
def obj_is_scalar(cls, val: Any) -> Any:
if not isinstance(val, SCALAR_TYPES):
raise ValueError(
(f"Expected data to be one of {SCALAR_TYPES}, is " f"{type(val)}: {val}")
)
return val
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
datatype = None
if isinstance(self.obj, int):
if self.obj > 2147483647:
datatype = XSD.nonNegativeInteger
literal = Literal(self.obj, datatype=datatype)
graph.add((subj, getattr(namespace, self.pred), literal))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
:param vertices: the list of all vertex dictionaries
:param edges: the list of all edge dictionaries
:param prefix: the prefix assigned to the key
:type parent: Dict
"""
obj = self.obj
if isinstance(obj, int):
# Need to handle numbers that are bigger than a Long in Java, for now we stringify it
if obj > 9223372036854775807 or obj < -9223372036854775807:
obj = str(obj)
elif isinstance(obj, SimpleLink):
print("ERROR ERROR")
parent[prefix + self.pred] = obj
class MultiLink(BaseLink):
"""Represents a named set of sublinks. For example an 'EBSVolumeAttachemnt'
MultiLink could exist which specifies sublinks Volume, AttachTime"""
pred: str
obj: "LinkCollection"
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
map_node = BNode()
graph.add((map_node, RDF.type, getattr(namespace, f"{self.pred}")))
self.obj.to_rdf(map_node, namespace, graph, node_cache)
graph.add((subj, getattr(namespace, self.pred), map_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: A string to prefix the property name with
"""
vertex_id = uuid.uuid1()
v = {
"~id": vertex_id,
"~label": self.pred,
}
edge_label = prefix if prefix != "" else self.pred
edge = {
"~id": uuid.uuid1(),
"~label": edge_label,
"~from": parent["~id"],
"~to": vertex_id,
}
edges.append(edge)
vertices.append(v)
self.obj.to_lpg(v,vertices, edges)
class ResourceLink(BaseLink):
"""Represents a link to another resource which must exist in the graph."""
pred: str
obj: str
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
link_node = node_cache.setdefault(self.obj, URIRef(self.obj))
graph.add((subj, getattr(namespace, self.pred), link_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: string to prefix the property name with
"""
edge = {
"~id": uuid.uuid1(),
"~label": "resource_link",
"~from": parent["~id"],
"~to": self.obj,
}
edges.append(edge)
class TransientResourceLink(BaseLink):
"""Represents a link to another resource which may or may not exist in the graph."""
pred: str
obj: str
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
link_node = node_cache.setdefault(self.obj, URIRef(self.obj))
graph.add((subj, getattr(namespace, self.pred), link_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent: the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: string to prefix the property name with
"""
edge = {
"~id": uuid.uuid1(),
"~label": "transient_resource_link",
"~from": parent["~id"],
"~to": self.obj,
}
edges.append(edge)
class TagLink(BaseLink):
"""Represents a AWS-style Tag attached to a node."""
pred: str
obj: str
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this link on a BNode in a Graph using a given Namespace to create the full
predicate.
Args:
subj: subject portion of triple - graph this link's pred, obj against it.
namespace: RDF namespace to use for this triple's predicate
graph: RDF graph
node_cache: NodeCache to use to find cached nodes.
"""
tag_id = f"{self.pred}:{self.obj}"
tag_node = node_cache.get(tag_id)
if tag_node is None:
tag_node = BNode()
graph.add((tag_node, namespace.key, Literal(self.pred)))
graph.add((tag_node, namespace.value, Literal(self.obj)))
graph.add((tag_node, RDF.type, getattr(namespace, "tag")))
node_cache[tag_id] = tag_node
graph.add((subj, getattr(namespace, "tag"), tag_node))
def to_lpg(
self, parent: Dict, vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Convert this link to the appropriate vertices, edges, and properties
Args:
:parent:git the parent dictionary vertex
vertices: the list of all vertex dictionaries
edges: the list of all edge dictionaries
prefix: string to prefix the property name with
"""
if not any(x["~id"] == f"{self.pred}:{self.obj}" for x in vertices):
vertex = {}
vertex["~id"] = f"{self.pred}:{self.obj}"
vertex["~label"] = "tag"
vertex[self.pred] = self.obj
vertices.append(vertex)
edge = {
"~id": uuid.uuid1(),
"~label": "tagged",
"~from": parent["~id"],
"~to": f"{self.pred}:{self.obj}",
}
edges.append(edge)
Link = Union[SimpleLink, MultiLink, TagLink, ResourceLink, TransientResourceLink]
class LinkCollection(BaseImmutableModel):
simple_links: Optional[Tuple[SimpleLink, ...]] = None
multi_links: Optional[Tuple[MultiLink, ...]] = None
tag_links: Optional[Tuple[TagLink, ...]] = None
resource_links: Optional[Tuple[ResourceLink, ...]] = None
transient_resource_links: Optional[Tuple[TransientResourceLink, ...]] = None
def to_rdf(
self, subj: BNode, namespace: Namespace, graph: Graph, node_cache: NodeCache
) -> None:
"""Graph this LinkCollection on an RDF graph"""
if self.simple_links:
for simple_link in self.simple_links:
simple_link.to_rdf(
subj=subj, namespace=namespace, graph=graph, node_cache=node_cache
)
if self.multi_links:
for multi_link in self.multi_links:
multi_link.to_rdf(
subj=subj, namespace=namespace, graph=graph, node_cache=node_cache
)
if self.tag_links:
for tag_link in self.tag_links:
tag_link.to_rdf(subj=subj, namespace=namespace, graph=graph, node_cache=node_cache)
if self.resource_links:
for resource_link in self.resource_links:
resource_link.to_rdf(
subj=subj, namespace=namespace, graph=graph, node_cache=node_cache
)
if self.transient_resource_links:
for transient_resource_link in self.transient_resource_links:
transient_resource_link.to_rdf(
subj=subj, namespace=namespace, graph=graph, node_cache=node_cache
)
def to_lpg(
self, vertex: Dict[str, Any], vertices: List[Dict], edges: List[Dict], prefix: str = ""
) -> None:
"""Graph this LinkCollection as a labelled property graph"""
if self.simple_links:
for simple_link in self.simple_links:
simple_link.to_lpg(vertex, vertices, edges, prefix)
if self.multi_links:
for multi_link in self.multi_links:
multi_link.to_lpg(vertex, vertices, edges, prefix)
if self.tag_links:
for tag_link in self.tag_links:
tag_link.to_lpg(vertex, vertices, edges, prefix)
if self.resource_links:
for resource_link in self.resource_links:
resource_link.to_lpg(vertex, vertices, edges, prefix)
if self.transient_resource_links:
for transient_resource_link in self.transient_resource_links:
transient_resource_link.to_lpg(vertex, vertices, edges, prefix)
def get_links(self) -> Tuple[Link, ...]:
return (
(self.simple_links if self.simple_links else ())
+ (self.multi_links if self.multi_links else ())
+ (self.tag_links if self.tag_links else ())
+ (self.resource_links if self.resource_links else ())
+ (self.transient_resource_links if self.transient_resource_links else ())
)
@classmethod
def from_links(cls: Type["LinkCollection"], links: Iterable[Link]) -> "LinkCollection":
simple_links: List[SimpleLink] = []
multi_links: List[MultiLink] = []
tag_links: List[TagLink] = []
resource_links: List[ResourceLink] = []
transient_resource_links: List[TransientResourceLink] = []
for link in links:
if isinstance(link, SimpleLink):
simple_links.append(link)
elif isinstance(link, MultiLink):
multi_links.append(link)
elif isinstance(link, TagLink):
tag_links.append(link)
elif isinstance(link, ResourceLink):
resource_links.append(link)
elif isinstance(link, TransientResourceLink):
transient_resource_links.append(link)
args = {
"simple_links": simple_links,
"multi_links": multi_links,
"tag_links": tag_links,
"resource_links": resource_links,
"transient_resource_links": transient_resource_links,
}
args_without_nulls = {key: val for key, val in args.items() if val}
return cls(**args_without_nulls)
def __add__(self, other: "LinkCollection") -> "LinkCollection":
simple_links = (self.simple_links if self.simple_links else ()) + (
other.simple_links if other.simple_links else ()
)
multi_links = (self.multi_links if self.multi_links else ()) + (
other.multi_links if other.multi_links else ()
)
tag_links = (self.tag_links if self.tag_links else ()) + (
other.tag_links if other.tag_links else ()
)
resource_links = (self.resource_links if self.resource_links else ()) + (
other.resource_links if other.resource_links else ()
)
transient_resource_links = (
self.transient_resource_links if self.transient_resource_links else ()
) + (other.transient_resource_links if other.transient_resource_links else ())
args = {
"simple_links": simple_links,
"multi_links": multi_links,
"tag_links": tag_links,
"resource_links": resource_links,
"transient_resource_links": transient_resource_links,
}
args_without_nulls = {key: val for key, val in args.items() if val}
return LinkCollection(**args_without_nulls)
MultiLink.update_forward_refs()
|
the-stack_106_31655 | #!/usr/bin/env python3.6
from os import path
from sklearn import preprocessing
import pandas as pd
import numpy as np
import json
import argparse
try:
import _pickle as pickle
except:
import pickle
import os
import librosa
import collections
from bbn_primitives.time_series import *
from d3m_metadata.container import ndarray
from d3m_metadata.container import List
from d3m_metadata import hyperparams, metadata as metadata_module, params, container, utils
from primitive_interfaces.transformer import TransformerPrimitiveBase
from primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from d3m.primitives.sklearn_wrap import *
import sklearn.metrics
# Example for the documentation of the TA1 pipeline submission process
#
# It executes a TA1 pipeline using a ta1-pipeline-config.json file that follows this structure:
# {
# "train_data":"path/to/train/data/folder/",
# "test_data":"path/to/test/data/folder/",
# "output_folder":"path/to/output/folder/"
# }
supportedResType = 'timeseries'
supportedTaskType = 'classification'
supportedTaskSubType = 'multiClass'
#def parse_dataset(datasetSchema):
# filename, start, end = None, None, None
#
# num_attribute = 0
# for colDesc in datasetSchema['dataResources'][1]['columns']:
# if 'attribute' in colDesc['role']:
# filename = colDesc['colName']
# num_attribute += 1
# if 'boundaryIndicator' in colDesc['role'] and colDesc['colName'] == 'start':
# start = colDesc['colName']
# if 'boundaryIndicator' in colDesc['role'] and colDesc['colName'] == 'end':
# end = colDesc['colName']
#
# if num_attribute != 1:
# raise Exception('Datasets with one column with attribute role supported (assumed to be filename).')
#
# return AudioDataset(filename = filename, start = start, end = end)
def extract_feats(inputs, inputsBoundaries, dir_name, fext_pipeline = None,
resampling_rate = None):
features = List()
i = 0
for idx, row in inputs.iterrows():
if row[0] == '':
features.append(np.array([]))
continue
filename = os.path.join(dir_name, row[0])
print(filename)
file_csvdata=pd.read_csv(filename,index_col=0)
csvdata = List[ndarray]([file_csvdata], {
'schema': metadata_module.CONTAINER_SCHEMA_VERSION,
'structural_type': List[ndarray],
'dimension': {
'length': 1
}
})
last_output = csvdata
for fext_step in fext_pipeline:
product = fext_step.produce(inputs = last_output)
last_output = product.value
features.append(last_output[0])
i+=1
return features
def pipeline(inputs, inputsBoundaries, dataset_path, dataset_schema,
fext_pipeline = None, proc_pipeline = None,
resampling_rate = None, train = False, train_targets = None,
fext_cacheFN = None):
#audio_dataset = parse_dataset(dataset_schema)
# generate or load cached features - curve fittings
audio_path = path.join(dataset_path, dataset_schema['dataResources'][0]['resPath'])
if fext_cacheFN is None or not os.path.isfile(fext_cacheFN):
segm_fittings = extract_feats(inputs, inputsBoundaries, audio_path,
fext_pipeline = fext_pipeline,
resampling_rate = resampling_rate)
if fext_cacheFN is not None:
with open(fext_cacheFN, 'wb') as fp:
pickle.dump(List([ List(x) for x in segm_fittings ]), fp)
if fext_cacheFN is not None:
with open(fext_cacheFN, 'rb') as fp:
segm_fittings = pickle.load(fp)
# process features - curve fittings
last_output = segm_fittings
for proc_step in proc_pipeline:
if train and not isinstance(proc_step, TransformerPrimitiveBase):
if isinstance(proc_step, UnsupervisedLearnerPrimitiveBase):
proc_step.set_training_data(inputs = last_output)
else:
proc_step.set_training_data(inputs = last_output,
outputs = train_targets)
proc_step.fit()
product = proc_step.produce(inputs = last_output)
last_output = product.value
return last_output
###############################################################################
############### MAIN #######################################
###############################################################################
parser = argparse.ArgumentParser(description='TA1 pipeline',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--cachedir', type=str, default=None,
help="directory to cache the extracted features")
parser.add_argument('--fext_wlen', type=float, default=200,
help="")
parser.add_argument('--fext_rel_wshift', type=float, default=1,
help="")
parser.add_argument('--fext_mfcc_ceps', type=int, default=3,
help="")
parser.add_argument('--fext_poly_deg', type=int, default=2,
help="")
parser.add_argument('--n_clusters', type=int, default=32,
help="")
parser.add_argument('--ngram', type=int, default=1,
help="")
parser.add_argument('--tfidf_norm', type=str, default='l2',
help="")
parser.add_argument('--tfidf_use_idf', type=int, default=1,
help="")
parser.add_argument('--tfidf_smooth_idf', type=int, default=1,
help="")
parser.add_argument('--tfidf_sublinear_tf', type=int, default=1,
help="")
parser.add_argument('--svc_penalty', type=str, default='l2',
help="")
parser.add_argument('--svc_loss', type=str, default='squared_hinge',
help="")
parser.add_argument('--svc_C', type=float, default=1,
help="")
args = parser.parse_args()
# Load the json configuration file
with open("ta1-pipeline-config.json", 'r') as inputFile:
jsonCall = json.load(inputFile)
inputFile.close()
# Load the problem description schema
with open( path.join(jsonCall['train_data'], 'problem_TRAIN', 'problemDoc.json' ) , 'r') as inputFile:
problemSchema = json.load(inputFile)
inputFile.close()
# Load the json dataset description file
trainDatasetPath = path.join(jsonCall['train_data'], 'dataset_TRAIN')
with open( path.join(trainDatasetPath, 'datasetDoc.json' ) , 'r') as inputFile:
datasetSchema = json.load(inputFile)
inputFile.close()
taskType = problemSchema['about']['taskType']
if taskType != supportedTaskType:
raise Exception('supported tasktype is %s, provided problem is of type %s' % (supportedTaskType, taskType))
taskSubType = problemSchema['about']['taskSubType']
if taskSubType != supportedTaskSubType:
raise Exception('supported tasktype is %s, provided problem is of type %s' % (supportedTaskSubType, taskSubType))
# Load the json dataset description file
with open( path.join(jsonCall['train_data'], 'dataset_TRAIN', 'datasetDoc.json' ) , 'r') as inputFile:
datasetSchema = json.load(inputFile)
inputFile.close()
if datasetSchema['dataResources'][0]['resType'] != supportedResType:
raise Exception('Supported resType is only %s' % (supportedResType))
# Get the target and attribute column ids from the dataset schema for training data
trainAttributesColumnIds = [ item['colIndex'] for item in datasetSchema['dataResources'][1]['columns'] if 'attribute' in item['role'] ]
boundariesColumnIds = [ item['colIndex'] for item in datasetSchema['dataResources'][1]['columns'] if 'boundaryIndicator' in item['role'] ]
trainTargetsColumnIds = [ item['colIndex'] for item in problemSchema['inputs']['data'][0]['targets'] ]
# Exit if more than one target
if len(trainAttributesColumnIds) != 1:
raise Exception('Only one attribute column expected, %d found in the problem. Exiting.' % (len(trainAttributesColumnIds)))
if len(trainTargetsColumnIds) != 1:
raise Exception('Only one target column expected, %d found in the problem. Exiting.' % (len(trainTargetsColumnIds)))
# Get the attribute column ids from the problem schema for test data (in this example, they are the same)
testAttributesColumnIds = trainAttributesColumnIds
# Load the tabular data file for training, replace missing values, and split it in train data and targets
trainDataResourcesPath = path.join(jsonCall['train_data'], 'dataset_TRAIN', datasetSchema['dataResources'][1]['resPath'])
#trainData = pd.read_csv( trainDataResourcesPath, header=0, usecols=trainAttributesColumnIds).fillna('0').replace('', '0')
#trainTargets = pd.read_csv( trainDataResourcesPath, header=0, usecols=trainTargetsColumnIds).fillna('0').replace('', '0')
trainData = pd.read_csv( trainDataResourcesPath, header=0, usecols=trainAttributesColumnIds).fillna('')
trainBoundaries = pd.read_csv( trainDataResourcesPath, header=0, usecols=boundariesColumnIds).fillna('')
trainTargets = pd.read_csv( trainDataResourcesPath, header=0, usecols=trainTargetsColumnIds).fillna('')
# Load the tabular data file for training, replace missing values, and split it in train data and targets
testDatasetPath = path.join(jsonCall['test_data'], 'dataset_TEST')
testDataResourcesPath = path.join(testDatasetPath, datasetSchema['dataResources'][1]['resPath'])
testData = pd.read_csv( testDataResourcesPath, header=0, usecols=testAttributesColumnIds).fillna('')
testBoundaries = pd.read_csv( testDataResourcesPath, header=0, usecols=boundariesColumnIds).fillna('')
# Get the d3mIndex of the testData
d3mIndex = pd.read_csv( testDataResourcesPath, header=0, usecols=['d3mIndex'])
# Encode the categorical data in training data
trainDataCatLabels = []
trainDataLabelEncoders = dict()
# Encode the categorical data in the test targets, uses the last target of the dataset as a target
trainTargetsCatLabel = ''
trainTargetsLabelEncoder = preprocessing.LabelEncoder()
for colDesc in datasetSchema['dataResources'][1]['columns']:
if colDesc['colType']=='categorical' and 'attribute' in colDesc['role']:
trainDataCatLabels.append(colDesc['colName'])
trainDataLabelEncoders[colDesc['colName']] = preprocessing.LabelEncoder().fit(trainData[colDesc['colName']])
trainData[colDesc['colName']] = trainDataLabelEncoders[colDesc['colName']].transform(trainData[colDesc['colName']])
elif colDesc['colType']=='categorical' and 'suggestedTarget' in colDesc['role']:
trainTargetsCatLabel = colDesc['colName']
trainTargetsLabelEncoder = trainTargetsLabelEncoder.fit(trainTargets[colDesc['colName']])
trainTargets = trainTargetsLabelEncoder.transform(trainTargets[colDesc['colName']])
# Train the model
# Build the feature extraction pipeline
resampling_rate = 1
channel_mixer = ChannelAverager(
hyperparams = ChannelAverager.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams'].defaults()
)
dither = SignalDither(
hyperparams = SignalDither.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams'].defaults()
)
framer_hyperparams = SignalFramer.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
framer_custom_hyperparams = dict()
framer_custom_hyperparams['sampling_rate'] = resampling_rate
if args.fext_wlen is not None:
framer_custom_hyperparams['frame_length_s'] = args.fext_wlen
if args.fext_rel_wshift is not None:
framer_custom_hyperparams['frame_shift_s'] = args.fext_rel_wshift*args.fext_wlen
framer = SignalFramer(
hyperparams = framer_hyperparams(
framer_hyperparams.defaults(), **framer_custom_hyperparams
)
)
mfcc_hyperparams = SignalMFCC.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
mfcc_custom_hyperparams = dict()
mfcc_custom_hyperparams['sampling_rate'] = resampling_rate
if args.fext_mfcc_ceps is not None:
mfcc_custom_hyperparams['num_ceps'] = args.fext_mfcc_ceps
mfcc = SignalMFCC(
hyperparams = mfcc_hyperparams(
mfcc_hyperparams.defaults(), **mfcc_custom_hyperparams
)
)
segm = UniformSegmentation(
hyperparams = UniformSegmentation.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams'].defaults()
)
segm_fitter_hyperparams = SegmentCurveFitter.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
segm_fitter_custom_hyperparams = dict()
if args.fext_poly_deg is not None:
segm_fitter_custom_hyperparams['deg'] = args.fext_poly_deg
segm_fitter = SegmentCurveFitter(
hyperparams = segm_fitter_hyperparams(
segm_fitter_hyperparams.defaults(), **segm_fitter_custom_hyperparams
)
)
fext_pipeline = [ channel_mixer, dither, framer, mfcc, segm, segm_fitter ]
print('Feature extraction pipeline:')
for fext_step in fext_pipeline:
print(fext_step.hyperparams)
# Build the classification pipeline
#clusterer = ClusterCurveFittingKMeans(hyperparams = ClusterCurveFittingKMeans.Hyperparams())
clusterer_hyperparams = ClusterCurveFittingKMeans.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
clusterer_custom_hyperparams = dict()
if args.n_clusters is not None:
clusterer_custom_hyperparams['n_clusters'] = args.n_clusters
clusterer = ClusterCurveFittingKMeans(
hyperparams = clusterer_hyperparams(
clusterer_hyperparams.defaults(), **clusterer_custom_hyperparams
)
)
fittings_framer_hyperparams = SignalFramer.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
fittings_framer_custom_hyperparams = {
'sampling_rate': 1, 'frame_shift_s': 1, 'flatten_output': False,
}
if args.ngram is not None:
fittings_framer_custom_hyperparams['frame_length_s'] = args.ngram
fittings_framer = SignalFramer(
hyperparams = fittings_framer_hyperparams(
fittings_framer_hyperparams.defaults(), **fittings_framer_custom_hyperparams
)
)
fittings_to_bot_hyperparams = SequenceToBagOfTokens.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
fittings_to_bot = SequenceToBagOfTokens(
hyperparams = fittings_to_bot_hyperparams(
fittings_to_bot_hyperparams.defaults()
)
)
tfidf_hyperparams = BBNTfidfTransformer.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
tfidf_custom_hyperparams = dict()
if args.tfidf_norm is not None:
tfidf_custom_hyperparams['norm'] = args.tfidf_norm
if args.tfidf_use_idf is not None:
tfidf_custom_hyperparams['use_idf'] = bool(args.tfidf_use_idf)
if args.tfidf_smooth_idf is not None:
tfidf_custom_hyperparams['smooth_idf'] = bool(args.tfidf_smooth_idf)
if args.tfidf_sublinear_tf is not None:
tfidf_custom_hyperparams['sublinear_tf'] = bool(args.tfidf_sublinear_tf)
tfidf = BBNTfidfTransformer(
hyperparams = tfidf_hyperparams(
tfidf_hyperparams.defaults(), **tfidf_custom_hyperparams
)
)
seq_modeler_hyperparams = SKLinearSVC.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
seq_modeler_custom_hyperparams = { 'dual': True }
if args.svc_penalty is not None:
seq_modeler_custom_hyperparams['penalty'] = args.svc_penalty
if args.svc_loss is not None:
seq_modeler_custom_hyperparams['loss'] = args.svc_loss
if args.svc_C is not None:
seq_modeler_custom_hyperparams['C'] = args.svc_C
seq_modeler = SKLinearSVC(
hyperparams = seq_modeler_hyperparams(
seq_modeler_hyperparams.defaults(), **seq_modeler_custom_hyperparams
)
)
proc_pipeline = [ clusterer, fittings_framer, fittings_to_bot, tfidf, seq_modeler ]
print('Classification pipeline:')
for proc_step in proc_pipeline:
print(proc_step.hyperparams)
#trainPredict = pipeline(trainData, clusterer, seq_modeler, train = True, train_targets = trainTargets)
#trainN = 3
trainN = len(trainData)
trainData = trainData[:trainN]
trainTargets = trainTargets[:trainN]
trainCacheFN = None #if args.cachedir is None else os.path.join(args.cachedir, 'fext_train.pkl')
trainPredict = pipeline(trainData, trainBoundaries, trainDatasetPath, datasetSchema,
fext_pipeline = fext_pipeline, proc_pipeline = proc_pipeline,
train = True, train_targets = trainTargets, resampling_rate = resampling_rate,
fext_cacheFN = trainCacheFN)
acc = np.mean(trainTargets == trainPredict)
print('Training accuracy: %f\n' % (acc))
confmat = sklearn.metrics.confusion_matrix(trainTargets, trainPredict)
print('Training confusion matrix: \n%s\n\n' % (confmat))
# Encode the testData using the previous label encoders
for colLabel in trainDataCatLabels:
testData[colLabel] = trainDataLabelEncoders[colLabel].transform(testData[colLabel])
# Predicts targets from the test data
#testN = 3
testN = len(testData)
testData = testData[:testN]
testCacheFN = None #if args.cachedir is None else os.path.join(args.cachedir, 'fext_test.pkl')
predictedTargets = pipeline(testData, testBoundaries, testDatasetPath, datasetSchema,
fext_pipeline = fext_pipeline, proc_pipeline = proc_pipeline,
train = False, resampling_rate = resampling_rate,
fext_cacheFN = testCacheFN)
# Reverse the label encoding for predicted targets
predictedTargets = trainTargetsLabelEncoder.inverse_transform(predictedTargets)
# Append the d3mindex column to the predicted targets
predictIndex = d3mIndex['d3mIndex'][:testN]
predictedTargets = pd.DataFrame({'d3mIndex':predictIndex, trainTargetsCatLabel:predictedTargets})
#predictedTargets = pd.DataFrame({'d3mIndex':d3mIndex['d3mIndex'], trainTargetsCatLabel:predictedTargets})
# Get the file path of the expected outputs
outputFilePath = path.join(jsonCall['output_folder'], problemSchema['expectedOutputs']['predictionsFile'])
# Outputs the predicted targets in the location specified in the JSON configuration file
with open(outputFilePath, 'w') as outputFile:
output = predictedTargets.to_csv(outputFile, index=False, columns=['d3mIndex', trainTargetsCatLabel])
|
the-stack_106_31656 | from statsmodels.compat.python import iteritems
from statsmodels.compat.pandas import assert_series_equal
from io import StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
from statsmodels.datasets import cpunish
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
import numpy as np
import pandas as pd
import patsy
import pytest
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setup_class(cls):
cls.data = load(as_pandas=False)
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
@pytest.mark.smoke
def test_summary(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setup_class(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setup_class()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setup_class(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setup_class()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0], [2], [1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO('"type","income","education","prestige"\n'
'"accountant","prof",62,86,82\n'
'"pilot","prof",72,76,83\n'
'"architect","prof",75,92,90\n'
'"author","prof",55,90,76\n'
'"chemist","prof",64,86,90\n'
'"minister","prof",21,84,87\n'
'"professor","prof",64,93,93\n'
'"dentist","prof",80,100,90\n'
'"reporter","wc",67,87,52\n'
'"engineer","prof",72,86,88\n'
'"undertaker","prof",42,74,57\n'
'"lawyer","prof",76,98,89\n'
'"physician","prof",76,97,97\n'
'"welfare.worker","prof",41,84,59\n'
'"teacher","prof",48,91,73\n'
'"conductor","wc",76,34,38\n'
'"contractor","prof",53,45,76\n'
'"factory.owner","prof",60,56,81\n'
'"store.manager","prof",42,44,45\n'
'"banker","prof",78,82,92\n'
'"bookkeeper","wc",29,72,39\n'
'"mail.carrier","wc",48,55,34\n'
'"insurance.agent","wc",55,71,41\n'
'"store.clerk","wc",29,50,16\n'
'"carpenter","bc",21,23,33\n'
'"electrician","bc",47,39,53\n'
'"RR.engineer","bc",81,28,67\n'
'"machinist","bc",36,32,57\n'
'"auto.repairman","bc",22,22,26\n'
'"plumber","bc",44,25,29\n'
'"gas.stn.attendant","bc",15,29,10\n'
'"coal.miner","bc",7,7,15\n'
'"streetcar.motorman","bc",42,26,19\n'
'"taxi.driver","bc",9,19,10\n'
'"truck.driver","bc",21,15,13\n'
'"machine.operator","bc",21,20,24\n'
'"barber","bc",16,26,20\n'
'"bartender","bc",16,28,7\n'
'"shoe.shiner","bc",9,17,3\n'
'"cook","bc",14,22,16\n'
'"soda.clerk","bc",12,30,6\n'
'"watchman","bc",17,25,11\n'
'"janitor","bc",7,20,8\n'
'"policeman","bc",34,47,41\n'
'"waiter","bc",8,32,10')
from pandas import read_csv
dta = read_csv(dta)
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
# `log` is needed in the namespace for patsy to find
from numpy import log # noqa:F401
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
def test_formula_predict_series():
data = pd.DataFrame({"y": [1, 2, 3], "x": [1, 2, 3]}, index=[5, 3, 1])
results = ols('y ~ x', data).fit()
result = results.predict(data)
expected = pd.Series([1., 2., 3.], index=[5, 3, 1])
assert_series_equal(result, expected)
result = results.predict(data.x)
assert_series_equal(result, expected)
result = results.predict(pd.Series([1, 2, 3], index=[1, 2, 3], name='x'))
expected = pd.Series([1., 2., 3.], index=[1, 2, 3])
assert_series_equal(result, expected)
result = results.predict({"x": [1, 2, 3]})
expected = pd.Series([1., 2., 3.], index=[0, 1, 2])
assert_series_equal(result, expected)
def test_patsy_lazy_dict():
class LazyDict(dict):
def __init__(self, data):
self.data = data
def __missing__(self, key):
return np.array(self.data[key])
data = cpunish.load_pandas().data
data = LazyDict(data)
res = ols('EXECUTIONS ~ SOUTH + INCOME', data=data).fit()
res2 = res.predict(data)
npt.assert_allclose(res.fittedvalues, res2)
data = cpunish.load_pandas().data
data['INCOME'].loc[0] = None
data = LazyDict(data)
data.index = cpunish.load_pandas().data.index
res = ols('EXECUTIONS ~ SOUTH + INCOME', data=data).fit()
res2 = res.predict(data)
assert_equal(res.fittedvalues, res2) # Should lose a record
assert_equal(len(res2) + 1, len(cpunish.load_pandas().data))
def test_patsy_missing_data():
# Test pandas-style first
data = cpunish.load_pandas().data
data['INCOME'].loc[0] = None
res = ols('EXECUTIONS ~ SOUTH + INCOME', data=data).fit()
res2 = res.predict(data)
# First record will be dropped during fit, but not during predict
assert_equal(res.fittedvalues, res2[1:])
# Non-pandas version
data = cpunish.load_pandas().data
data['INCOME'].loc[0] = None
data = data.to_records(index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
res2 = res.predict(data)
assert 'ValueWarning' in repr(w[-1].message)
assert 'nan values have been dropped' in repr(w[-1].message)
# Frist record will be dropped in both cases
assert_equal(res.fittedvalues, res2)
def test_predict_nondataframe():
df = pd.DataFrame([[3, 0.030], [10, 0.060], [20, 0.120]],
columns=['BSA', 'Absorbance'])
model = ols('Absorbance ~ BSA', data=df)
fit = model.fit()
error = patsy.PatsyError
with pytest.raises(error):
fit.predict([0.25])
def test_formula_environment():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [2, 4, 6]})
env = patsy.EvalEnvironment([{'z': [3, 6, 9]}])
model = ols('y ~ x + z', eval_env=env, data=df)
assert 'z' in model.exog_names
with pytest.raises(TypeError):
ols('y ~ x', eval_env='env', data=df)
|
the-stack_106_31657 | from collections import defaultdict, deque
class IntComputer:
class Halt(Exception):
pass
def __init__(self, mem, inputs=()):
self.pc = 0
self.rb = 0
self.mem = defaultdict(int, ((i, v) for i, v in enumerate(mem)))
self.input = deque(inputs)
self.output = deque()
self.__last_op = None
self.ops = {
1: self.op_add,
2: self.op_mul,
3: self.op_input,
4: self.op_output,
5: self.op_jump_true,
6: self.op_jump_false,
7: self.op_lt,
8: self.op_eq,
9: self.op_inc_rb,
99: self.op_halt,
}
def op_add(self, x, y, z):
self.mem[z] = self.mem[x] + self.mem[y]
def op_mul(self, x, y, z):
self.mem[z] = self.mem[x] * self.mem[y]
def op_input(self, x):
self.mem[x] = self.input.popleft()
def op_output(self, x):
self.output.append(self.mem[x])
def op_jump_true(self, x, y):
if self.mem[x]:
self.pc = self.mem[y]
def op_jump_false(self, x, y):
if not self.mem[x]:
self.pc = self.mem[y]
def op_lt(self, x, y, z):
self.mem[z] = int(self.mem[x] < self.mem[y])
def op_eq(self, x, y, z):
self.mem[z] = int(self.mem[x] == self.mem[y])
def op_inc_rb(self, x):
self.rb += self.mem[x]
def op_halt(self):
raise IntComputer.Halt
def step(self):
opcode = self.mem[self.pc]
self.pc += 1
op = self.ops[opcode % 100]
nargs = op.__code__.co_argcount - 1
modes = str(opcode // 100).zfill(nargs)[::-1]
args = []
for i in range(nargs):
if modes[i] == '0':
args.append(self.mem[self.pc])
elif modes[i] == '1':
args.append(self.pc)
elif modes[i] == '2':
args.append(self.mem[self.pc] + self.rb)
else:
raise Exception(f"invalid mode '{modes[i]}'")
self.pc += 1
self.__last_op = op
op(*args)
def run(self, until=op_halt):
while True:
try:
self.step()
except IntComputer.Halt:
if until is not IntComputer.op_halt:
raise IntComputer.Halt
if self.__last_op.__func__ is until:
return self.output
def reset(self):
self.pc = 0
self.rb =0
|
the-stack_106_31658 | #-------------------------------------------------------------------------------
# Name: Subscription Counter
# Purpose: Calculate the number of subscriptions made each month.
#
# Author: Naseela Amboorallee
#
# Created: 13/02/2018
# Copyright: (c) Naseela Amboorallee 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
# Create a data frame from the csv file.
df = pd.read_csv("RSE_members.csv")
def plotgraph(df_plot,file,title,xvalue,yvalue):
"""
Creare and export a plot of the dataframe.
:params: dataframe, file name, graph title, data frame columns
:return: a png file of the plotted graph
"""
# Created variables needed for plotting.
fig, ax = plt.subplots()
x = df_plot[xvalue]
y = df_plot[yvalue]
# Create a new list with the desired tick labels.
xticklist = x.tolist()
print(xticklist)
xticklist = xticklist[0::6]
print(xticklist)
# Plot data into line graph with setttings
ax.plot(x,y)
# Auto format the date labels on the x axis.
fig.autofmt_xdate()
# Set the title and labels for the graph.
ax.set_title(title)
ax.set_xlabel(xvalue)
ax.set_ylabel(yvalue)
# Set major and minor ticks for thhe x axis.
start, end = ax.get_xlim()
major_ticks = np.arange(start+2, end,6)
minor_ticks = np.arange(start,end)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
# Set the tick labels as the list with the desired scale.
ax.set_xticklabels(xticklist)
# Ensure the layout displays correctly.
plt.tight_layout()
# Export to png file.
plt.savefig(file+".png")
# Format the date column
# Create a new column where the dates contain only the month and year.
# Map function lambda which removes preceeding numerical values from sub dates.
df["Sub month"]=df["Sub date"].map(lambda x: x.lstrip('0123456789').rstrip())
# Convert string into datetime data type then format and overwrite column.
df["Month"]=pd.to_datetime(df["Sub month"])
df["Month"]=df["Month"].dt.strftime('%Y/%m')
# Count the subscriptions per month.
# Group months and create new column from the amount of times the group appears.
df["Subscriptions"] = df.groupby(df["Month"])["Month"].transform('count')
# Removes all duplicate rows from the data frame.
df.drop_duplicates(subset="Month", inplace=True)
# Create a new data frame containing only the necessary columns.
subdf = df[["Month","Subscriptions"]].copy()
print (subdf)
# Export the new data frame into a csv file.
subdf.to_csv('Subscription_count.csv')
# Plot the count graph.
plotgraph(subdf,"Subscription_count","Subscriptions per Month","Month", "Subscriptions")
# Create a new column for the cumulative sum in the sub data frame.
subdf["Total Subscribers"]= subdf["Subscriptions"].cumsum()
# Create a new data frame from the sub data frame with desire columns.
cumdf = subdf[["Month","Total Subscribers"]].copy()
print(cumdf)
# Plot the cumulative sum graph.
plotgraph(cumdf,"Total_subs","Total Subscribers","Month","Total Subscribers") |
the-stack_106_31659 | import re
valid_re = re.compile('^[\w-]+$')
def validate_feed_id(feed_id):
'''
Validates the input is in the format of user:1
:param feed_id: a feed such as user:1
Raises ValueError if the format doesnt match
'''
feed_id = str(feed_id)
if len(feed_id.split(':')) != 2:
msg = 'Invalid feed_id spec %s, please specify the feed_id as feed_slug:feed_id'
raise ValueError(msg % feed_id)
feed_slug, user_id = feed_id.split(':')
feed_slug = validate_feed_slug(feed_slug)
user_id = validate_user_id(user_id)
return feed_id
def validate_feed_slug(feed_slug):
'''
Validates the feed slug falls into \w
'''
feed_slug = str(feed_slug)
if not valid_re.match(feed_slug):
msg = 'Invalid feed slug %s, please only use letters, numbers and _'
raise ValueError(msg % feed_slug)
return feed_slug
def validate_user_id(user_id):
'''
Validates the user id falls into \w
'''
user_id = str(user_id)
if not valid_re.match(user_id):
msg = 'Invalid user id %s, please only use letters, numbers and _'
raise ValueError(msg % user_id)
return user_id
def validate_foreign_id_time(foreign_id_time):
if not isinstance(foreign_id_time, (list, tuple)):
raise ValueError('foreign_id_time should be a list of tuples')
for v in foreign_id_time:
if not isinstance(v, (list, tuple)):
raise ValueError('foreign_id_time elements should be lists or tuples')
if len(v) != 2:
raise ValueError('foreign_id_time elements should have two elements')
|
the-stack_106_31660 | #!/usr/bin/env python3
"""Acquisition script for HP4194A Impedance Analyzer"""
import argparse
import configparser
import datetime
import os
import subprocess
import sys
import numpy
import pylab
import pyvisa
import scipy.io as scio
import matplotlib.pyplot as pyplot
DEBUG = False
FILE_EXT = '.mat'
def main(filename):
"""Acquire and plot/save data."""
r = subprocess.run('git describe --tags --always', shell=True,
stdout=subprocess.PIPE)
program_version = r.stdout.strip().decode()
def to_tuple(s):
return [int(float(v)) for v in s.split(',')]
parser = configparser.ConfigParser()
parser.read('hp4194a.ini')
setup_section = parser['setup']
resource_name = setup_section.get('resource_name')
gpib_address = int(setup_section.get('gpib_address'))
sweep_section = parser['sweep']
start_frequency = int(float(sweep_section.get('start_frequency')))
stop_frequency = int(float(sweep_section.get('stop_frequency')))
number_of_points = int(sweep_section.get('number_of_points'))
number_of_averages = int(sweep_section.get('number_of_averages'))
display_range_a = to_tuple(sweep_section.get('display_range_a'))
display_range_b = to_tuple(sweep_section.get('display_range_b'))
bias_voltage = int(sweep_section.get('bias_voltage'))
rm = pyvisa.ResourceManager('@py')
inst = rm.open_resource(resource_name)
inst.timeout = 10000
inst.write('++mode 1') # Configure as controller
inst.write('++auto 1') # Configure read-after-write
inst.write('++addr %d' % gpib_address)
inst.write('++clr')
print(inst.query('++ver'))
inst.write('IMP2') # R-X
inst.write('ITM2') # Integration time medium
inst.write(f'START={start_frequency}')
inst.write(f'STOP={stop_frequency}')
inst.write(f'AMIN={display_range_a[0]}')
inst.write(f'AMAX={display_range_a[1]}')
inst.write(f'BMIN={display_range_b[0]}')
inst.write(f'BMAX={display_range_b[1]}')
inst.write(f'NOP={number_of_points}')
inst.write(f'NOA={number_of_averages}')
inst.write('SHT1') # Short compensation on
inst.write('OPN1') # Open compenstaion on
inst.write(f'BIAS={bias_voltage}')
inst.write('RQS2')
inst.write('SWM2') # Single sweep
inst.write('SWTRG') # Trigger acquisition
inst.write('CMT"Acquiring sweep"')
sweep_finished = False
print("Acquiring sweep")
while not sweep_finished:
polled = inst.query('++spoll')
if polled:
try:
status_byte = int(polled)
except (ValueError, IndexError):
print("Serial poll returned unexpected value: {}"
.format(polled))
break
if DEBUG:
print("{:08b}".format(status_byte))
sweep_finished = not status_byte & 0x01
print("Acquisition complete")
inst.write('DCOFF') # Bias off
a = inst.query_ascii_values('A?', container=numpy.array)
b = inst.query_ascii_values('B?', container=numpy.array)
save = True
if len(a) != number_of_points:
print("Number of points transfered from Channel A: %d")
save = False
if len(b) != number_of_points:
print("Number of points transfered from Channel B: %d")
save = False
if save:
scio.savemat(filename, {
'time': datetime.datetime.now().isoformat(),
'acqProgramVersion': program_version,
'biasVoltage': bias_voltage,
'numberOfAverages': number_of_averages,
'Frequency': (start_frequency, stop_frequency),
'ChannelA': a,
'ChannelB': b,
})
# Strip .mat when printing
inst.write(f'CMT"Saved as {os.path.basename(filename[:-4])}"')
print(f"Data saved to {filename}")
t = pylab.linspace(start_frequency, stop_frequency,
number_of_points)
plotyy(t, a, b, display_range_a, display_range_b)
else:
print("No data saved")
inst.close()
rm.close()
def plotyy(t, y1, y2, y1lim, y2lim):
"""Plot data with two y-axes."""
t /= 1e3 # Hz -> kHz
fig, ax1 = pyplot.subplots()
color = 'tab:orange'
ax1.set_xlabel('Frequency [kHz]')
ax1.set_ylabel('R', color=color)
ax1.set_xlim(t[0], t[-1])
ax1.set_ylim(y1lim)
ax1.tick_params(axis='y', labelcolor=color)
ax1.plot(t, y1, color=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('X', color=color)
ax2.set_xlim(t[0], t[-1])
ax2.set_ylim(y2lim)
ax2.tick_params(axis='y', labelcolor=color)
ax2.plot(t, y2, color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
pyplot.show()
return fig, ax1, ax2
def default_filename():
"""Create ISO8601 timestamp as default filename
The format is: YYYYMMDDTHHMMSS
"""
now = datetime.datetime.now().isoformat()
return now.replace('-', '').replace(':', '').split('.')[0]
def parse_args():
"""Parse command line arguments."""
default = default_filename()
parser = argparse.ArgumentParser(description='HP4194A acquisition script')
parser.add_argument('filename', nargs='?')
args = parser.parse_args()
if args.filename:
filename = args.filename
else:
filename = input(f"Enter a filepath or press [ENTER] to accept the "
f"default ({default}.mat):") or default
if not filename.endswith(FILE_EXT):
filename += FILE_EXT
if os.path.exists(filename):
resp = input(f"File {filename} exists. Are you sure you want "
f"to overwrite it (y/n)?")
if resp.lower() != 'y':
sys.exit(0)
return filename
if __name__ == '__main__':
main(parse_args())
|
the-stack_106_31661 | import threading
import os
import time
import codecs
import requests
import json
from ecdsa import SigningKey, SECP256k1
import sha3
import traceback
def getAddress(phrases):
keyList = []
addrList = []
addrStr = ""
try:
for phrase in phrases:
key = sha3.keccak_256(phrase.encode("utf-8")).hexdigest()
priv = codecs.decode(key, 'hex_codec')
pub = SigningKey.from_string(priv, curve=SECP256k1).get_verifying_key().to_string()
addr = "0x" + sha3.keccak_256(pub).hexdigest()[24:]
keyList.append(key)
addrList.append(addr)
if len(addrStr): addrStr = addrStr + ","
addrStr = addrStr + addr
except:
pass
return [keyList, addrList, addrStr]
def getBalances(addrStr):
balances = ""
try:
r = requests.get(url='https://etherchain.org/api/account/multiple/%s' % addrStr, timeout=5)
balances = r.text
except:
return
try:
balances = json.loads(balances)
if balances['status'] != 1: raise Exception("API Busy")
balances = balances['data']
except:
print (balances)
return balances
getCount = 0
fp_dict = open("dict.txt", "r")
#fp_found = open("found.txt", "w+")
#fp_fund = open("fund.txt", "w+")
def getWallet():
global getCount
while True:
phrases = []
try:
for i in range(50):
readStr = fp_dict.readline().replace("\r","").replace("\n","")
if not len(readStr): break
phrases.append(readStr)
except:
pass
if len(phrases) <= 0: break
addressRet = getAddress(phrases)
getCount = getCount + len(phrases)
try:
balancesRet = getBalances(addressRet[2])
for balance in balancesRet:
key = ""
for i in range(0, len(addressRet[1])):
if balance['address'] == addressRet[1][i]:
key = addressRet[0][i]
break
if key == "": continue
#fp_found.write(str(balance['balance']) + " " + key + " " + balance['address'] + "\n")
#if balance['balance'] > 0:
#fp_fund.write(str(balance['balance']) + " " + key + " " + balance['address'] + "\n")
print (balance['balance'], key, balance['address'])
#fp_found.flush()
#fp_fund.flush()
except:
traceback.print_exc()
break
clearScreen()
print (getCount)
break
def clearScreen():
os.system('clear')
def main():
threads = []
for i in range(1):
threads.append(threading.Thread(target=getWallet,args=()))
for t in threads:
time.sleep(1.0)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
the-stack_106_31663 | """
Unit tests for Random CP optimiser on Cartesian product domains.
-- [email protected]
"""
# pylint: disable=invalid-name
# pylint: disable=abstract-class-little-used
# Local imports
from demos_synthetic.multiobjective_park.multiobjective_park import objectives as moo_park
from demos_synthetic.multiobjective_hartmann.multiobjective_hartmann \
import objectives as moo_hartmann
from demos_synthetic.multiobjective_branin_currinexp.multiobjective_branin_currinexp \
import objectives as moo_branin
from ..exd.cp_domain_utils import get_raw_point_from_processed_point, \
load_config_file
from ..exd.experiment_caller import get_multifunction_caller_from_config
from ..exd.worker_manager import SyntheticWorkerManager
from . import random_multiobjective_optimiser
from ..utils.base_test_class import BaseTestClass, execute_tests
from ..utils.reporters import get_reporter
class CPMultiObjectiveOptimiserBaseTestCase(object):
""" Base test class for optimisers on Cartesian product spaces. """
# pylint: disable=no-member
def setUp(self):
""" Set up. """
self.max_capital = 20
self._child_set_up()
self.worker_manager_1 = SyntheticWorkerManager(1, time_distro='const')
self.worker_manager_3 = SyntheticWorkerManager(3, time_distro='halfnormal')
self.opt_problems = [
('demos_synthetic/multiobjective_branin_currinexp/config.json', (moo_branin,)),
('demos_synthetic/multiobjective_hartmann/config.json', (moo_hartmann,)),
('demos_synthetic/multiobjective_park/config.json', (moo_park,)),
]
def _child_set_up(self):
""" Child set up. """
pass
@classmethod
def _child_instantiate_optimiser(cls, func_caller, worker_manager, options, reporter):
""" Instantiate the optimiser. """
raise NotImplementedError('Implement in a child class.')
@classmethod
def _run_optimiser(cls, raw_funcs, domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs):
""" Run the optimiser from given args. """
raise NotImplementedError('Implement in a child class.')
def test_instantiation(self):
""" Tests instantiation of the optimiser. """
self.report('Test instantiation of multi-objective optimiser.')
for idx, (dcf, (raw_prob_funcs, )) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing instantiation of optimiser for %s.'%(
idx + 1, len(self.opt_problems), dcf), 'test_result')
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
optimiser = self._child_instantiate_optimiser(
multi_func_caller, self.worker_manager_3, options=None,
reporter=get_reporter('silent'))
self.report('Instantiated %s object.'%(type(optimiser)))
for attr in dir(optimiser):
if not attr.startswith('_'):
self.report('optimiser.%s = %s'%(attr, str(getattr(optimiser, attr))),
'test_result')
def _test_optimiser_results(self, raw_prob_funcs, pareto_vals, pareto_points,
history, dcf):
""" Tests optimiser results. """
config = load_config_file(dcf)
multi_func_caller = get_multifunction_caller_from_config(raw_prob_funcs, config)
raw_pareto_points = [get_raw_point_from_processed_point(pop, config.domain,
config.domain_orderings.index_ordering,
config.domain_orderings.dim_ordering)
for pop in pareto_points]
self.report('Pareto opt point [-1]: proc=%s, raw=%s.'%(pareto_points[-1],
raw_pareto_points[-1]))
saved_in_history = [key for key, _ in list(history.__dict__.items()) if not
key.startswith('__')]
self.report('Stored in history: %s.'%(saved_in_history), 'test_result')
assert len(history.curr_pareto_vals) == len(history.curr_pareto_points)
for val in pareto_vals:
assert len(val) == multi_func_caller.num_funcs
for pt in pareto_points:
assert len(pt) == config.domain.num_domains
self.report('Pareto optimal points: %s.'%(pareto_points))
self.report('Pareto optimal values: %s.'%(pareto_vals))
def test_optimisation_single(self):
""" Test optimisation with a single worker. """
self.report('')
self.report('Testing %s with one worker.'%(type(self)))
for idx, (dcf, (raw_prob_funcs, )) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 1 worker on %s.'%(
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_1.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_1, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
def test_optimisation_asynchronous(self):
""" Testing random optimiser with three asynchronous workers. """
self.report('')
self.report('Testing %s with three asynchronous workers.'%(type(self)))
for idx, (dcf, (raw_prob_funcs, )) in enumerate(self.opt_problems):
self.report('[%d/%d] Testing optimisation with 3 asynchronous workers on %s.'%(
idx + 1, len(self.opt_problems), dcf), 'test_result')
self.worker_manager_3.reset()
pareto_vals, pareto_points, history = self._run_optimiser(raw_prob_funcs, dcf,
self.worker_manager_3, self.max_capital, 'asy')
self._test_optimiser_results(raw_prob_funcs, pareto_vals, pareto_points, history,
dcf)
self.report('')
class CPRandomMultiObjectiveOptimiserTestCase(
CPMultiObjectiveOptimiserBaseTestCase, BaseTestClass):
""" Unit tests for random multi-objective optimisation. """
@classmethod
def _child_instantiate_optimiser(cls, multi_func_caller, worker_manager, options,
reporter):
""" Instantiate optimiser. """
return random_multiobjective_optimiser.CPRandomMultiObjectiveOptimiser(
multi_func_caller, worker_manager, options, reporter)
@classmethod
def _run_optimiser(cls, raw_prob_funcs, domain_config_file, worker_manager, max_capital,
mode, *args, **kwargs):
""" Runs multi-objective optimiser. """
rmoo = random_multiobjective_optimiser
return rmoo.cp_random_multiobjective_optimisation_from_raw_args(raw_prob_funcs,
domain_config_file, worker_manager, max_capital, mode, *args, **kwargs)
if __name__ == '__main__':
execute_tests()
|
the-stack_106_31668 | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, Union
import discord.abc
from .asset import Asset
from .colour import Colour
from .enums import DefaultAvatar
from .flags import PublicUserFlags
from .utils import MISSING, _bytes_to_base64_data, snowflake_time
if TYPE_CHECKING:
from datetime import datetime
from .channel import DMChannel
from .guild import Guild
from .message import Message
from .state import ConnectionState
from .types.channel import DMChannel as DMChannelPayload
from .types.user import PartialUser as PartialUserPayload, User as UserPayload
__all__ = (
"User",
"ClientUser",
)
BU = TypeVar("BU", bound="BaseUser")
class _UserTag:
__slots__ = ()
id: int
class BaseUser(_UserTag):
__slots__ = (
"name",
"id",
"discriminator",
"_avatar",
"_banner",
"_accent_colour",
"bot",
"system",
"_public_flags",
"_state",
)
if TYPE_CHECKING:
name: str
id: int
discriminator: str
bot: bool
system: bool
_state: ConnectionState
_avatar: Optional[str]
_banner: Optional[str]
_accent_colour: Optional[str]
_public_flags: int
def __init__(
self, *, state: ConnectionState, data: Union[UserPayload, PartialUserPayload]
) -> None:
self._state = state
self._update(data)
def __repr__(self) -> str:
return (
f"<BaseUser id={self.id} name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} system={self.system}>"
)
def __str__(self) -> str:
return f"{self.name}#{self.discriminator}"
def __eq__(self, other: Any) -> bool:
return isinstance(other, _UserTag) and other.id == self.id
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return self.id >> 22
def _update(self, data: Union[UserPayload, PartialUserPayload]) -> None:
self.name = data["username"]
self.id = int(data["id"])
self.discriminator = data["discriminator"]
self._avatar = data["avatar"]
self._banner = data.get("banner", None)
self._accent_colour = data.get("accent_color", None)
self._public_flags = data.get("public_flags", 0)
self.bot = data.get("bot", False)
self.system = data.get("system", False)
@classmethod
def _copy(cls: Type[BU], user: BU) -> BU:
self = cls.__new__(cls) # bypass __init__
self.name = user.name
self.id = user.id
self.discriminator = user.discriminator
self._avatar = user._avatar
self._banner = user._banner
self._accent_colour = user._accent_colour
self.bot = user.bot
self._state = user._state
self._public_flags = user._public_flags
return self
def _to_minimal_user_json(self) -> Dict[str, Any]:
return {
"username": self.name,
"id": self.id,
"avatar": self._avatar,
"discriminator": self.discriminator,
"bot": self.bot,
}
@property
def public_flags(self) -> PublicUserFlags:
""":class:`PublicUserFlags`: The publicly available flags the user has."""
return PublicUserFlags._from_value(self._public_flags)
@property
def avatar(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns an :class:`Asset` for the avatar the user has.
If the user does not have a traditional avatar, ``None`` is returned.
If you want the avatar that a user has displayed, consider :attr:`display_avatar`.
"""
if self._avatar is not None:
return Asset._from_avatar(self._state, self.id, self._avatar)
return None
@property
def default_avatar(self) -> Asset:
""":class:`Asset`: Returns the default avatar for a given user. This is calculated by the user's discriminator."""
return Asset._from_default_avatar(self._state, int(self.discriminator) % len(DefaultAvatar))
@property
def display_avatar(self) -> Asset:
""":class:`Asset`: Returns the user's display avatar.
For regular users this is just their default avatar or uploaded avatar.
.. versionadded:: 2.0
"""
return self.avatar or self.default_avatar
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the user's banner asset, if available.
.. versionadded:: 2.0
.. note::
This information is only available via :meth:`Client.fetch_user`.
"""
if self._banner is None:
return None
return Asset._from_banner(self._state, self.id, self._banner)
@property
def accent_colour(self) -> Optional[Colour]:
"""Optional[:class:`Colour`]: Returns the user's accent colour, if applicable.
There is an alias for this named :attr:`accent_color`.
.. versionadded:: 2.0
.. note::
This information is only available via :meth:`Client.fetch_user`.
"""
if self._accent_colour is None:
return None
return Colour(int(self._accent_colour))
@property
def accent_color(self) -> Optional[Colour]:
"""Optional[:class:`Colour`]: Returns the user's accent color, if applicable.
There is an alias for this named :attr:`accent_colour`.
.. versionadded:: 2.0
.. note::
This information is only available via :meth:`Client.fetch_user`.
"""
return self.accent_colour
@property
def colour(self) -> Colour:
""":class:`Colour`: A property that returns a colour denoting the rendered colour
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :attr:`color`.
"""
return Colour.default()
@property
def color(self) -> Colour:
""":class:`Colour`: A property that returns a color denoting the rendered color
for the user. This always returns :meth:`Colour.default`.
There is an alias for this named :attr:`colour`.
"""
return self.colour
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given user."""
return f"<@{self.id}>"
@property
def created_at(self) -> datetime:
""":class:`datetime.datetime`: Returns the user's creation time in UTC.
This is when the user's Discord account was created.
"""
return snowflake_time(self.id)
@property
def display_name(self) -> str:
""":class:`str`: Returns the user's display name.
For regular users this is just their username, but
if they have a guild specific nickname then that
is returned instead.
"""
return self.name
def mentioned_in(self, message: Message) -> bool:
"""Checks if the user is mentioned in the specified message.
Parameters
-----------
message: :class:`Message`
The message to check if you're mentioned in.
Returns
-------
:class:`bool`
Indicates if the user is mentioned in the message.
"""
if message.mention_everyone:
return True
return any(user.id == self.id for user in message.mentions)
class ClientUser(BaseUser):
"""Represents your Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
bot: :class:`bool`
Specifies if the user is a bot account.
system: :class:`bool`
Specifies if the user is a system user (i.e. represents Discord officially).
.. versionadded:: 1.3
verified: :class:`bool`
Specifies if the user's email is verified.
locale: Optional[:class:`str`]
The IETF language tag used to identify the language the user is using.
mfa_enabled: :class:`bool`
Specifies if the user has MFA turned on and working.
"""
__slots__ = ("locale", "_flags", "verified", "mfa_enabled", "__weakref__")
if TYPE_CHECKING:
verified: bool
locale: Optional[str]
mfa_enabled: bool
_flags: int
def __init__(self, *, state: ConnectionState, data: UserPayload) -> None:
super().__init__(state=state, data=data)
def __repr__(self) -> str:
return (
f"<ClientUser id={self.id} name={self.name!r} discriminator={self.discriminator!r}"
f" bot={self.bot} verified={self.verified} mfa_enabled={self.mfa_enabled}>"
)
def _update(self, data: UserPayload) -> None:
super()._update(data)
# There's actually an Optional[str] phone field as well but I won't use it
self.verified = data.get("verified", False)
self.locale = data.get("locale")
self._flags = data.get("flags", 0)
self.mfa_enabled = data.get("mfa_enabled", False)
async def edit(self, *, username: str = MISSING, avatar: bytes = MISSING) -> ClientUser:
"""|coro|
Edits the current profile of the client.
.. note::
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
.. versionchanged:: 2.0
The edit is no longer in-place, instead the newly edited client user is returned.
Parameters
-----------
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
Returns
---------
:class:`ClientUser`
The newly edited client user.
"""
payload: Dict[str, Any] = {}
if username is not MISSING:
payload["username"] = username
if avatar is not MISSING:
payload["avatar"] = _bytes_to_base64_data(avatar)
data: UserPayload = await self._state.http.edit_profile(payload)
return ClientUser(state=self._state, data=data)
class User(BaseUser, discord.abc.Messageable):
"""Represents a Discord user.
.. container:: operations
.. describe:: x == y
Checks if two users are equal.
.. describe:: x != y
Checks if two users are not equal.
.. describe:: hash(x)
Return the user's hash.
.. describe:: str(x)
Returns the user's name with discriminator.
Attributes
-----------
name: :class:`str`
The user's username.
id: :class:`int`
The user's unique ID.
discriminator: :class:`str`
The user's discriminator. This is given when the username has conflicts.
bot: :class:`bool`
Specifies if the user is a bot account.
system: :class:`bool`
Specifies if the user is a system user (i.e. represents Discord officially).
"""
__slots__ = ("_stored",)
def __init__(
self, *, state: ConnectionState, data: Union[UserPayload, PartialUserPayload]
) -> None:
super().__init__(state=state, data=data)
self._stored: bool = False
def __repr__(self) -> str:
return f"<User id={self.id} name={self.name!r} discriminator={self.discriminator!r} bot={self.bot}>"
def __del__(self) -> None:
try:
if self._stored:
self._state.deref_user(self.id)
except Exception:
pass
@classmethod
def _copy(cls, user: User):
self = super()._copy(user)
self._stored = False
return self
async def _get_channel(self) -> DMChannel:
ch = await self.create_dm()
return ch
@property
def dm_channel(self) -> Optional[DMChannel]:
"""Optional[:class:`DMChannel`]: Returns the channel associated with this user if it exists.
If this returns ``None``, you can create a DM channel by calling the
:meth:`create_dm` coroutine function.
"""
return self._state._get_private_channel_by_user(self.id)
@property
def mutual_guilds(self) -> List[Guild]:
"""List[:class:`Guild`]: The guilds that the user shares with the client.
.. note::
This will only return mutual guilds within the client's internal cache.
.. versionadded:: 1.7
"""
return [guild for guild in self._state._guilds.values() if guild.get_member(self.id)]
async def create_dm(self) -> DMChannel:
"""|coro|
Creates a :class:`DMChannel` with this user.
This should be rarely called, as this is done transparently for most
people.
Returns
-------
:class:`.DMChannel`
The channel that was created.
"""
found = self.dm_channel
if found is not None:
return found
state = self._state
data: DMChannelPayload = await state.http.start_private_message(self.id)
return state.add_dm_channel(data)
|
the-stack_106_31672 | import collections
import ctypes
import errno
import fcntl
import os
import os.path
import select
import time
class GPIOError(IOError):
"""Base class for GPIO errors."""
pass
class EdgeEvent(collections.namedtuple('EdgeEvent', ['edge', 'timestamp'])):
def __new__(cls, edge, timestamp):
"""EdgeEvent containing the event edge and event time reported by Linux.
Args:
edge (str): event edge, either "rising" or "falling".
timestamp (int): event time in nanoseconds.
"""
return super(EdgeEvent, cls).__new__(cls, edge, timestamp)
class GPIO(object):
def __new__(cls, *args):
if len(args) > 2:
return CdevGPIO.__new__(cls, *args)
else:
return SysfsGPIO.__new__(cls, *args)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
# Methods
def read(self):
"""Read the state of the GPIO.
Returns:
bool: ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
raise NotImplementedError()
def write(self, value):
"""Set the state of the GPIO to `value`.
Args:
value (bool): ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `value` type is not bool.
"""
raise NotImplementedError()
def poll(self, timeout=None):
"""Poll a GPIO for the edge event configured with the .edge property.
For character device GPIOs, the edge event should be consumed with
`read_event()`. For sysfs GPIOs, the edge event should be consumed with
`read()`.
`timeout` can be a positive number for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Defaults to
blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if an edge event occurred, ``False`` on timeout.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `timeout` type is not None or int.
"""
raise NotImplementedError()
def read_event(self):
"""Read the edge event that occurred with the GPIO.
This method is intended for use with character device GPIOs and is
unsupported by sysfs GPIOs.
Returns:
EdgeEvent: a namedtuple containing the string edge event that
occurred (either ``"rising"`` or ``"falling"``), and the event time
reported by Linux in nanoseconds.
Raises:
GPIOError: if an I/O or OS error occurs.
NotImplementedError: if called on a sysfs GPIO.
"""
raise NotImplementedError()
def close(self):
"""Close the sysfs GPIO.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
raise NotImplementedError()
# Immutable properties
@property
def devpath(self):
"""Get the device path of the underlying GPIO device.
:type: str
"""
raise NotImplementedError()
@property
def fd(self):
"""Get the line file descriptor of the GPIO object.
:type: int
"""
raise NotImplementedError()
@property
def line(self):
"""Get the GPIO object's line number.
:type: int
"""
raise NotImplementedError()
@property
def name(self):
"""Get the line name of the GPIO.
his method is intended for use with character device GPIOs and always
returns the empty string for sysfs GPIOs.
:type: str
"""
raise NotImplementedError()
@property
def chip_fd(self):
"""Get the GPIO chip file descriptor of the GPIO object.
This method is intended for use with character device GPIOs and is unsupported by sysfs GPIOs.
Raises:
NotImplementedError: if accessed on a sysfs GPIO.
:type: int
"""
raise NotImplementedError()
@property
def chip_name(self):
"""Get the name of the GPIO chip associated with the GPIO.
:type: str
"""
raise NotImplementedError()
@property
def chip_label(self):
""" Get the label of the GPIO chip associated with the GPIO.
:type: str
"""
raise NotImplementedError()
# Mutable properties
def _get_direction(self):
raise NotImplementedError()
def _set_direction(self, direction):
raise NotImplementedError()
direction = property(_get_direction, _set_direction)
"""Get or set the GPIO's direction. Can be "in", "out", "high", "low".
Direction "in" is input; "out" is output, initialized to low; "high" is
output, initialized to high; and "low" is output, initialized to low.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `direction` type is not str.
ValueError: if `direction` value is invalid.
:type: str
"""
def _get_edge(self):
raise NotImplementedError()
def _set_edge(self, edge):
raise NotImplementedError()
edge = property(_get_edge, _set_edge)
"""Get or set the GPIO's interrupt edge. Can be "none", "rising",
"falling", "both".
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `edge` type is not str.
ValueError: if `edge` value is invalid.
:type: str
"""
# String representation
def __str__(self):
"""Get the string representation of the GPIO.
:type: str
"""
raise NotImplementedError()
class _CGpiochipInfo(ctypes.Structure):
_fields_ = [
('name', ctypes.c_char * 32),
('label', ctypes.c_char * 32),
('lines', ctypes.c_uint32),
]
class _CGpiolineInfo(ctypes.Structure):
_fields_ = [
('line_offset', ctypes.c_uint32),
('flags', ctypes.c_uint32),
('name', ctypes.c_char * 32),
('consumer', ctypes.c_char * 32),
]
class _CGpiohandleRequest(ctypes.Structure):
_fields_ = [
('lineoffsets', ctypes.c_uint32 * 64),
('flags', ctypes.c_uint32),
('default_values', ctypes.c_uint8 * 64),
('consumer_label', ctypes.c_char * 32),
('lines', ctypes.c_uint32),
('fd', ctypes.c_int),
]
class _CGpiohandleData(ctypes.Structure):
_fields_ = [
('values', ctypes.c_uint8 * 64),
]
class _CGpioeventRequest(ctypes.Structure):
_fields_ = [
('lineoffset', ctypes.c_uint32),
('handleflags', ctypes.c_uint32),
('eventflags', ctypes.c_uint32),
('consumer_label', ctypes.c_char * 32),
('fd', ctypes.c_int),
]
class _CGpioeventData(ctypes.Structure):
_fields_ = [
('timestamp', ctypes.c_uint64),
('id', ctypes.c_uint32),
]
class CdevGPIO(GPIO):
# Constants scraped from <linux/gpio.h>
_GPIOHANDLE_GET_LINE_VALUES_IOCTL = 0xc040b408
_GPIOHANDLE_SET_LINE_VALUES_IOCTL = 0xc040b409
_GPIO_GET_CHIPINFO_IOCTL = 0x8044b401
_GPIO_GET_LINEINFO_IOCTL = 0xc048b402
_GPIO_GET_LINEHANDLE_IOCTL = 0xc16cb403
_GPIO_GET_LINEEVENT_IOCTL = 0xc030b404
_GPIOHANDLE_REQUEST_INPUT = 0x1
_GPIOHANDLE_REQUEST_OUTPUT = 0x2
_GPIOEVENT_REQUEST_RISING_EDGE = 0x1
_GPIOEVENT_REQUEST_FALLING_EDGE = 0x2
_GPIOEVENT_REQUEST_BOTH_EDGES = 0x3
_GPIOEVENT_EVENT_RISING_EDGE = 0x1
_GPIOEVENT_EVENT_FALLING_EDGE = 0x2
def __init__(self, path, line, direction):
"""**Character device GPIO**
Instantiate a GPIO object and open the character device GPIO with the
specified line and direction at the specified GPIO chip path (e.g.
"/dev/gpiochip0").
`direction` can be "in" for input; "out" for output, initialized to
low; "high" for output, initialized to high; or "low" for output,
initialized to low.
Args:
path (str): GPIO chip character device path.
line (int, str): GPIO line number or name.
direction (str): GPIO direction, can be "in", "out", "high", or
"low".
Returns:
CdevGPIO: GPIO object.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `path`, `line`, or `direction` types are invalid.
ValueError: if `direction` value is invalid.
LookupError: if the GPIO line was not found by the provided name.
"""
self._devpath = None
self._line_fd = None
self._chip_fd = None
self._edge = "none"
self._direction = "in"
self._line = None
self._open(path, line, direction)
def __new__(self, path, line, direction):
return object.__new__(CdevGPIO)
def _open(self, path, line, direction):
if not isinstance(path, str):
raise TypeError("Invalid path type, should be string.")
if not isinstance(line, (int, str)):
raise TypeError("Invalid line type, should be integer or string.")
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
# Open GPIO chip
try:
self._chip_fd = os.open(path, 0)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO chip: " + e.strerror)
self._devpath = path
if isinstance(line, int):
self._line = line
self._reopen(direction, "none")
else:
self._line = self._find_line_by_name(line)
self._reopen(direction, "none")
def _reopen(self, direction, edge):
# Close existing line
if self._line_fd is not None:
try:
os.close(self._line_fd)
except OSError as e:
raise GPIOError(e.errno, "Closing existing GPIO line: " + e.strerror)
if direction == "in":
if edge == "none":
request = _CGpiohandleRequest()
request.lineoffsets[0] = self._line
request.flags = CdevGPIO._GPIOHANDLE_REQUEST_INPUT
request.consumer_label = b"periphery"
request.lines = 1
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_LINEHANDLE_IOCTL, request)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Opening input line handle: " + e.strerror)
self._line_fd = request.fd
self._direction = "in"
self._edge = "none"
else:
request = _CGpioeventRequest()
request.lineoffset = self._line
request.handleflags = CdevGPIO._GPIOHANDLE_REQUEST_INPUT
request.eventflags = CdevGPIO._GPIOEVENT_REQUEST_RISING_EDGE if edge == "rising" else CdevGPIO._GPIOEVENT_REQUEST_FALLING_EDGE if edge == "falling" else CdevGPIO._GPIOEVENT_REQUEST_BOTH_EDGES
request.consumer_label = b"periphery"
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_LINEEVENT_IOCTL, request)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Opening input line event handle: " + e.strerror)
self._line_fd = request.fd
self._direction = "in"
self._edge = edge
else:
request = _CGpiohandleRequest()
initial_value = True if direction == "high" else False
request.lineoffsets[0] = self._line
request.flags = CdevGPIO._GPIOHANDLE_REQUEST_OUTPUT
request.default_values[0] = initial_value
request.consumer_label = b"periphery"
request.lines = 1
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_LINEHANDLE_IOCTL, request)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Opening output line handle: " + e.strerror)
self._line_fd = request.fd
self._direction = "out"
self._edge = "none"
def _find_line_by_name(self, line):
# Get chip info for number of lines
chip_info = _CGpiochipInfo()
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_CHIPINFO_IOCTL, chip_info)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Querying GPIO chip info: " + e.strerror)
# Get each line info
line_info = _CGpiolineInfo()
for i in range(chip_info.lines):
line_info.line_offset = i
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_LINEINFO_IOCTL, line_info)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Querying GPIO line info: " + e.strerror)
if line_info.name.decode() == line:
return i
raise LookupError("Opening GPIO line: GPIO line \"{:s}\" not found by name.".format(line))
# Methods
def read(self):
data = _CGpiohandleData()
try:
fcntl.ioctl(self._line_fd, CdevGPIO._GPIOHANDLE_GET_LINE_VALUES_IOCTL, data)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Getting line value: " + e.strerror)
return bool(data.values[0])
def write(self, value):
if not isinstance(value, bool):
raise TypeError("Invalid value type, should be bool.")
data = _CGpiohandleData()
data.values[0] = value
try:
fcntl.ioctl(self._line_fd, CdevGPIO._GPIOHANDLE_SET_LINE_VALUES_IOCTL, data)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Setting line value: " + e.strerror)
def poll(self, timeout=None):
if not isinstance(timeout, (int, float, type(None))):
raise TypeError("Invalid timeout type, should be integer, float, or None.")
# Setup poll
p = select.poll()
p.register(self._line_fd, select.POLLIN | select.POLLPRI | select.POLLERR)
# Scale timeout to milliseconds
if isinstance(timeout, (int, float)) and timeout > 0:
timeout *= 1000
# Poll
events = p.poll(timeout)
return len(events) > 0
def read_event(self):
if self._edge == "none":
raise GPIOError(None, "Invalid operation: GPIO edge not set")
try:
buf = os.read(self._line_fd, ctypes.sizeof(_CGpioeventData))
except OSError as e:
raise GPIOError(e.errno, "Reading GPIO event: " + e.strerror)
event_data = _CGpioeventData.from_buffer_copy(buf)
if event_data.id == CdevGPIO._GPIOEVENT_EVENT_RISING_EDGE:
edge = "rising"
elif event_data.id == CdevGPIO._GPIOEVENT_EVENT_FALLING_EDGE:
edge = "falling"
else:
edge = "none"
timestamp = event_data.timestamp
return EdgeEvent(edge, timestamp)
def close(self):
try:
if self._line_fd is not None:
os.close(self._line_fd)
except OSError as e:
raise GPIOError(e.errno, "Closing GPIO line: " + e.strerror)
try:
if self._chip_fd is not None:
os.close(self._chip_fd)
except OSError as e:
raise GPIOError(e.errno, "Closing GPIO chip: " + e.strerror)
self._line_fd = None
self._chip_fd = None
self._edge = "none"
self._direction = "in"
self._line = None
# Immutable properties
@property
def devpath(self):
return self._devpath
@property
def fd(self):
return self._line_fd
@property
def line(self):
return self._line
@property
def name(self):
line_info = _CGpiolineInfo()
line_info.line_offset = self._line
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_LINEINFO_IOCTL, line_info)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Querying GPIO line info: " + e.strerror)
return line_info.name.decode()
@property
def chip_fd(self):
return self._chip_fd
@property
def chip_name(self):
chip_info = _CGpiochipInfo()
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_CHIPINFO_IOCTL, chip_info)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Querying GPIO chip info: " + e.strerror)
return chip_info.name.decode()
@property
def chip_label(self):
chip_info = _CGpiochipInfo()
try:
fcntl.ioctl(self._chip_fd, CdevGPIO._GPIO_GET_CHIPINFO_IOCTL, chip_info)
except (OSError, IOError) as e:
raise GPIOError(e.errno, "Querying GPIO chip info: " + e.strerror)
return chip_info.label.decode()
# Mutable properties
def _get_direction(self):
return self._direction
def _set_direction(self, direction):
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
if self._direction == direction:
return
self._reopen(direction, "none")
direction = property(_get_direction, _set_direction)
def _get_edge(self):
return self._edge
def _set_edge(self, edge):
if not isinstance(edge, str):
raise TypeError("Invalid edge type, should be string.")
if edge.lower() not in ["none", "rising", "falling", "both"]:
raise ValueError("Invalid edge, can be: \"none\", \"rising\", \"falling\", \"both\".")
if self._direction != "in":
raise GPIOError(None, "Invalid operation: cannot set edge on output GPIO")
if self._edge == edge:
return
self._reopen("in", edge)
edge = property(_get_edge, _set_edge)
# String representation
def __str__(self):
try:
str_name = self.name
except GPIOError:
str_name = "<error>"
try:
str_direction = self.direction
except GPIOError:
str_direction = "<error>"
try:
str_edge = self.edge
except GPIOError:
str_edge = "<error>"
try:
str_chip_name = self.chip_name
except GPIOError:
str_chip_name = "<error>"
try:
str_chip_label = self.chip_label
except GPIOError:
str_chip_label = "<error>"
return "GPIO {:d} (name=\"{:s}\", device={:s}, line_fd={:d}, chip_fd={:d}, direction={:s}, edge={:s}, chip_name=\"{:s}\", chip_label=\"{:s}\", type=cdev)" \
.format(self._line, str_name, self._devpath, self._line_fd, self._chip_fd, str_direction, str_edge, str_chip_name, str_chip_label)
class SysfsGPIO(GPIO):
# Number of retries to check for GPIO export or direction write on open
GPIO_OPEN_RETRIES = 10
# Delay between check for GPIO export or direction write on open (100ms)
GPIO_OPEN_DELAY = 0.1
def __init__(self, line, direction):
"""**Sysfs GPIO**
Instantiate a GPIO object and open the sysfs GPIO with the specified
line and direction.
`direction` can be "in" for input; "out" for output, initialized to
low; "high" for output, initialized to high; or "low" for output,
initialized to low.
Args:
line (int): GPIO line number.
direction (str): GPIO direction, can be "in", "out", "high", or
"low",
Returns:
SysfsGPIO: GPIO object.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `line` or `direction` types are invalid.
ValueError: if `direction` value is invalid.
TimeoutError: if waiting for GPIO export times out.
"""
self._fd = None
self._line = None
self._open(line, direction)
def __new__(self, line, direction):
return object.__new__(SysfsGPIO)
def _open(self, line, direction):
if not isinstance(line, int):
raise TypeError("Invalid line type, should be integer.")
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
gpio_path = "/sys/class/gpio/gpio{:d}".format(line)
if not os.path.isdir(gpio_path):
# Export the line
try:
with open("/sys/class/gpio/export", "w") as f_export:
f_export.write("{:d}\n".format(line))
except IOError as e:
raise GPIOError(e.errno, "Exporting GPIO: " + e.strerror)
# Loop until GPIO is exported
exported = False
for i in range(SysfsGPIO.GPIO_OPEN_RETRIES):
if os.path.isdir(gpio_path):
exported = True
break
time.sleep(SysfsGPIO.GPIO_OPEN_DELAY)
if not exported:
raise TimeoutError("Exporting GPIO: waiting for \"{:s}\" timed out".format(gpio_path))
# Write direction, looping in case of EACCES errors due to delayed udev
# permission rule application after export
for i in range(SysfsGPIO.GPIO_OPEN_RETRIES):
try:
with open(os.path.join(gpio_path, "direction"), "w") as f_direction:
f_direction.write(direction.lower() + "\n")
break
except IOError as e:
if e.errno != errno.EACCES or (e.errno == errno.EACCES and i == SysfsGPIO.GPIO_OPEN_RETRIES - 1):
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
time.sleep(SysfsGPIO.GPIO_OPEN_DELAY)
else:
# Write direction
try:
with open(os.path.join(gpio_path, "direction"), "w") as f_direction:
f_direction.write(direction.lower() + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
# Open value
try:
self._fd = os.open(os.path.join(gpio_path, "value"), os.O_RDWR)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO: " + e.strerror)
self._line = line
self._path = gpio_path
# Methods
def read(self):
# Read value
try:
buf = os.read(self._fd, 2)
except OSError as e:
raise GPIOError(e.errno, "Reading GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
if buf[0] == b"0"[0]:
return False
elif buf[0] == b"1"[0]:
return True
raise GPIOError(None, "Unknown GPIO value: {}".format(buf))
def write(self, value):
if not isinstance(value, bool):
raise TypeError("Invalid value type, should be bool.")
# Write value
try:
if value:
os.write(self._fd, b"1\n")
else:
os.write(self._fd, b"0\n")
except OSError as e:
raise GPIOError(e.errno, "Writing GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
def poll(self, timeout=None):
if not isinstance(timeout, (int, float, type(None))):
raise TypeError("Invalid timeout type, should be integer, float, or None.")
# Setup poll
p = select.poll()
p.register(self._fd, select.POLLPRI | select.POLLERR)
# Scale timeout to milliseconds
if isinstance(timeout, (int, float)) and timeout > 0:
timeout *= 1000
# Poll
events = p.poll(timeout)
# If GPIO edge interrupt occurred
if events:
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
return True
return False
def read_event(self):
raise NotImplementedError()
def close(self):
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise GPIOError(e.errno, "Closing GPIO: " + e.strerror)
self._fd = None
# Unexport the line
try:
unexport_fd = os.open("/sys/class/gpio/unexport", os.O_WRONLY)
os.write(unexport_fd, "{:d}\n".format(self._line).encode())
os.close(unexport_fd)
except OSError as e:
raise GPIOError(e.errno, "Unexporting GPIO: " + e.strerror)
# Immutable properties
@property
def devpath(self):
return self._path
@property
def fd(self):
return self._fd
@property
def line(self):
return self._line
@property
def name(self):
return ""
@property
def chip_fd(self):
raise NotImplementedError("Sysfs GPIO does not have a gpiochip file descriptor.")
@property
def chip_name(self):
gpio_path = os.path.join(self._path, "device")
gpiochip_path = os.readlink(gpio_path)
if '/' not in gpiochip_path:
raise GPIOError(None, "Reading gpiochip name: invalid device symlink \"{:s}\"".format(gpiochip_path))
return gpiochip_path.split('/')[-1]
@property
def chip_label(self):
gpio_path = "/sys/class/gpio/{:s}/label".format(self.chip_name)
try:
with open(gpio_path, "r") as f_label:
label = f_label.read()
except (GPIOError, IOError) as e:
if isinstance(e, IOError):
raise GPIOError(e.errno, "Reading gpiochip label: " + e.strerror)
raise GPIOError(None, "Reading gpiochip label: " + e.strerror)
return label.strip()
# Mutable properties
def _get_direction(self):
# Read direction
try:
with open(os.path.join(self._path, "direction"), "r") as f_direction:
direction = f_direction.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO direction: " + e.strerror)
return direction.strip()
def _set_direction(self, direction):
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
# Write direction
try:
with open(os.path.join(self._path, "direction"), "w") as f_direction:
f_direction.write(direction.lower() + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
direction = property(_get_direction, _set_direction)
def _get_edge(self):
# Read edge
try:
with open(os.path.join(self._path, "edge"), "r") as f_edge:
edge = f_edge.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO edge: " + e.strerror)
return edge.strip()
def _set_edge(self, edge):
if not isinstance(edge, str):
raise TypeError("Invalid edge type, should be string.")
if edge.lower() not in ["none", "rising", "falling", "both"]:
raise ValueError("Invalid edge, can be: \"none\", \"rising\", \"falling\", \"both\".")
# Write edge
try:
with open(os.path.join(self._path, "edge"), "w") as f_edge:
f_edge.write(edge.lower() + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO edge: " + e.strerror)
edge = property(_get_edge, _set_edge)
# String representation
def __str__(self):
try:
str_direction = self.direction
except GPIOError:
str_direction = "<error>"
try:
str_edge = self.edge
except GPIOError:
str_edge = "<error>"
try:
str_chip_name = self.chip_name
except GPIOError:
str_chip_name = "<error>"
try:
str_chip_label = self.chip_label
except GPIOError:
str_chip_label = "<error>"
return "GPIO {:d} (device={:s}, fd={:d}, direction={:s}, edge={:s}, chip_name=\"{:s}\", chip_label=\"{:s}\", type=sysfs)" \
.format(self._line, self._path, self._fd, str_direction, str_edge, str_chip_name, str_chip_label)
|
the-stack_106_31674 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to build and test (verify) reviews that are posted
to ReviewBoard. The script is intended for use by automated "ReviewBots"
that are run on ASF infrastructure (or by anyone that wishes to donate
some compute power). For example, see 'support/jenkins/reviewbot.sh'.
The script performs the following sequence:
* A query grabs review IDs from Reviewboard.
* In reverse order (most recent first), the script determines if the
review needs verification (if the review has been updated or changed
since the last run through this script).
* For each review that needs verification:
* The review is applied (via 'support/apply-reviews.py').
* Mesos is built and unit tests are run.
* The result is posted to ReviewBoard.
"""
import argparse
import atexit
import json
import os
import platform
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from datetime import datetime
from common import REVIEWBOARD_URL
REVIEW_SIZE = 1000000 # 1 MB in bytes.
# Parse arguments.
parser = argparse.ArgumentParser(
description="Reviews that need verification from the Review Board")
parser.add_argument("-u", "--user", type=str, required=True,
help="Review Board user name")
parser.add_argument("-p", "--password", type=str, required=True,
help="Review Board user password")
parser.add_argument("-r", "--reviews", type=int, required=False,
default=-1, help="The number of reviews to fetch,"
" that will need verification")
# Unless otherwise specified consider pending review requests to Mesos updated
# since 03/01/2014.
group = "mesos"
last_updated = "2014-03-01T00:00:00"
query_parameters = "?to-groups=%s&status=pending&last-updated-from=%s" \
% (group, last_updated)
parser.add_argument("-q", "--query", type=str, required=False,
help="Query parameters", default=query_parameters)
parser.add_argument("-o", "--out-file", type=str, required=False,
help="The out file with the reviews IDs that"
" need verification")
parser.add_argument("--skip-verify", action='store_true', required=False,
help="Skip the verification and just write the review"
" ids that need verification")
parameters = parser.parse_args()
USER = parameters.user
PASSWORD = parameters.password
NUM_REVIEWS = parameters.reviews
QUERY_PARAMS = parameters.query
OUT_FILE = parameters.out_file
SKIP_VERIFY = parameters.skip_verify
class ReviewError(Exception):
"""Exception returned by post_review()."""
pass
def shell(command):
"""Run a shell command."""
print(command)
out = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
return out.decode(sys.stdout.encoding)
def api(url, data=None):
"""Call the ReviewBoard API."""
try:
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(
realm="Web API",
uri="reviews.apache.org",
user=USER,
passwd=PASSWORD)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
if isinstance(data, str):
data = str.encode(data)
return json.loads(urllib.request.urlopen(url, data=data).read())
except urllib.error.HTTPError as err:
print("Error handling URL %s: %s (%s)" % (url, err.reason, err.read()))
exit(1)
except urllib.error.URLError as err:
print("Error handling URL %s: %s" % (url, err.reason))
exit(1)
def apply_review(review_id):
"""Apply a review using the script apply-reviews.py."""
print("Applying review %s" % review_id)
shell("%s support/apply-reviews.py -n -r %s" % (sys.executable, review_id))
def apply_reviews(review_request, reviews):
"""Apply multiple reviews at once."""
# If there are no reviewers specified throw an error.
if not review_request["target_people"]:
raise ReviewError("No reviewers specified. Please find a reviewer by"
" asking on JIRA or the mailing list.")
# If there is a circular dependency throw an error.`
if review_request["id"] in reviews:
raise ReviewError("Circular dependency detected for review %s."
"Please fix the 'depends_on' field."
% review_request["id"])
else:
reviews.append(review_request["id"])
# First recursively apply the dependent reviews.
for review in review_request["depends_on"]:
review_url = review["href"]
print("Dependent review: %s " % review_url)
apply_reviews(api(review_url)["review_request"], reviews)
# Now apply this review if not yet submitted.
if review_request["status"] != "submitted":
apply_review(review_request["id"])
def post_review(review_request, message):
"""Post a review on the review board."""
print("Posting review: %s" % message)
review_url = review_request["links"]["reviews"]["href"]
data = urllib.parse.urlencode({'body_top': message, 'public': 'true'})
api(review_url, data)
@atexit.register
def cleanup():
"""Clean the git repository."""
try:
shell("git clean -fd")
HEAD = shell("git rev-parse HEAD")
print(HEAD)
shell("git checkout HEAD -- %s" % HEAD)
except subprocess.CalledProcessError as err:
print("Failed command: %s\n\nError: %s" % (err.cmd, err.output))
def verify_review(review_request):
"""Verify a review."""
print("Verifying review %s" % review_request["id"])
build_output = "build_" + str(review_request["id"])
try:
# Recursively apply the review and its dependents.
reviews = []
apply_reviews(review_request, reviews)
reviews.reverse() # Reviews are applied in the reverse order.
command = ""
if platform.system() == 'Windows':
command = "support\\windows-build.bat"
# There is no equivalent to `tee` on Windows.
subprocess.check_call(
['cmd', '/c', '%s 2>&1 > %s' % (command, build_output)])
else:
# Launch docker build script.
# TODO(jojy): Launch 'docker_build.sh' in subprocess so that
# verifications can be run in parallel for various configurations.
configuration = ("export "
"OS='ubuntu:14.04' "
"BUILDTOOL='autotools' "
"COMPILER='gcc' "
"CONFIGURATION='--verbose "
"--disable-libtool-wrappers' "
"ENVIRONMENT='GLOG_v=1 MESOS_VERBOSE=1'")
command = "%s; ./support/docker-build.sh" % configuration
# `tee` the output so that the console can log the whole build
# output. `pipefail` ensures that the exit status of the build
# command ispreserved even after tee'ing.
subprocess.check_call(['bash', '-c',
('set -o pipefail; %s 2>&1 | tee %s')
% (command, build_output)])
# Success!
post_review(
review_request,
"Patch looks great!\n\n" \
"Reviews applied: %s\n\n" \
"Passed command: %s" % (reviews, command))
except subprocess.CalledProcessError as err:
# If we are here because the docker build command failed, read the
# output from `build_output` file. For all other command failures read
# the output from `e.output`.
if os.path.exists(build_output):
output = open(build_output).read()
else:
output = err.output.decode(sys.stdout.encoding)
if platform.system() == 'Windows':
# We didn't output anything during the build (because `tee`
# doesn't exist), so we print the output to stdout upon error.
sys.stdout.buffer.write(output.encode())
# Truncate the output when posting the review as it can be very large.
if len(output) > REVIEW_SIZE:
output = "...<truncated>...\n" + output[-REVIEW_SIZE:]
output += "\nFull log: "
output += urllib.parse.urljoin(os.environ['BUILD_URL'], 'console')
post_review(
review_request,
"Bad patch!\n\n" \
"Reviews applied: %s\n\n" \
"Failed command: %s\n\n" \
"Error:\n%s" % (reviews, err.cmd, output))
except ReviewError as err:
post_review(
review_request,
"Bad review!\n\n" \
"Reviews applied: %s\n\n" \
"Error:\n%s" % (reviews, err.args[0]))
# Clean up.
cleanup()
def needs_verification(review_request):
"""Return True if this review request needs to be verified."""
print("Checking if review: %s needs verification" % review_request["id"])
# Skip if the review blocks another review.
if review_request["blocks"]:
print("Skipping blocking review %s" % review_request["id"])
return False
diffs_url = review_request["links"]["diffs"]["href"]
diffs = api(diffs_url)
if "diffs" not in diffs: # No diffs attached!
print("Skipping review %s as it has no diffs" % review_request["id"])
return False
# Get the timestamp of the latest diff.
timestamp = diffs["diffs"][-1]["timestamp"]
rb_date_format = "%Y-%m-%dT%H:%M:%SZ"
diff_time = datetime.strptime(timestamp, rb_date_format)
print("Latest diff timestamp: %s" % diff_time)
# Get the timestamp of the latest review from this script.
reviews_url = review_request["links"]["reviews"]["href"]
reviews = api(reviews_url + "?max-results=200")
review_time = None
for review in reversed(reviews["reviews"]):
if review["links"]["user"]["title"] == USER:
timestamp = review["timestamp"]
review_time = datetime.strptime(timestamp, rb_date_format)
print("Latest review timestamp: %s" % review_time)
break
# TODO: Apply this check recursively up the dependency chain.
changes_url = review_request["links"]["changes"]["href"]
changes = api(changes_url)
dependency_time = None
for change in changes["changes"]:
if "depends_on" in change["fields_changed"]:
timestamp = change["timestamp"]
dependency_time = datetime.strptime(timestamp, rb_date_format)
print("Latest dependency change timestamp: %s" % dependency_time)
break
# Needs verification if there is a new diff, or if the dependencies changed,
# after the last time it was verified.
return not review_time or review_time < diff_time or \
(dependency_time and review_time < dependency_time)
def write_review_ids(review_ids):
"""Write the IDs of the review requests that need verification."""
print("%s review requests need verification" % len(review_ids))
if OUT_FILE is not None:
with open(OUT_FILE, 'w') as f:
f.write('\n'.join(review_ids))
else:
print('\n'.join(review_ids))
def main():
"""Main function to verify the submitted reviews."""
review_requests_url = \
"%s/api/review-requests/%s" % (REVIEWBOARD_URL, QUERY_PARAMS)
review_requests = api(review_requests_url)
review_ids = []
for review_request in reversed(review_requests["review_requests"]):
if (NUM_REVIEWS == -1 or len(review_ids) < NUM_REVIEWS) and \
needs_verification(review_request):
if not SKIP_VERIFY:
verify_review(review_request)
review_ids.append(str(review_request["id"]))
write_review_ids(review_ids)
if __name__ == '__main__':
main()
|
the-stack_106_31675 | import torch.nn as nn
import torch
from .model import Model
from IPython import embed
import pdb
class RugE(Model):
"""`Knowledge Graph Embedding with Iterative Guidance from Soft Rules`_ (RugE), which is a novel paradigm of KG embedding with iterative guidance from soft rules.
Attributes:
args: Model configuration parameters.
epsilon: Caculate embedding_range.
margin: Caculate embedding_range and loss.
embedding_range: Uniform distribution range.
ent_emb: Entity embedding, shape:[num_ent, emb_dim].
rel_emb: Relation_embedding, shape:[num_rel, emb_dim].
.. _Knowledge Graph Embedding with Iterative Guidance from Soft Rules: https://ojs.aaai.org/index.php/AAAI/article/view/11918
"""
def __init__(self, args):
super(RugE, self).__init__(args)
self.args = args
self.ent_emb = None
self.rel_emb = None
self.init_emb()
def init_emb(self):
"""Initialize the entity and relation embeddings in the form of a uniform distribution.
"""
self.epsilon = 2.0
self.margin = nn.Parameter(
torch.Tensor([self.args.margin]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.margin.item() + self.epsilon) / self.args.emb_dim]),
requires_grad=False
)
self.ent_emb = nn.Embedding(self.args.num_ent, self.args.emb_dim * 2)
self.rel_emb = nn.Embedding(self.args.num_rel, self.args.emb_dim * 2)
nn.init.uniform_(tensor=self.ent_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
nn.init.uniform_(tensor=self.rel_emb.weight.data, a=-self.embedding_range.item(), b=self.embedding_range.item())
def score_func(self, head_emb, relation_emb, tail_emb, mode):
"""Calculating the score of triples.
The formula for calculating the score is :math:`Re(< wr, es, e¯o >)`
Args:
head_emb: The head entity embedding.
relation_emb: The relation embedding.
tail_emb: The tail entity embedding.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
re_head, im_head = torch.chunk(head_emb, 2, dim=-1)
re_relation, im_relation = torch.chunk(relation_emb, 2, dim=-1)
re_tail, im_tail = torch.chunk(tail_emb, 2, dim=-1)
return torch.sum(
re_head * re_tail * re_relation
+ im_head * im_tail * re_relation
+ re_head * im_tail * im_relation
- im_head * re_tail * im_relation,
-1
)
def forward(self, triples, negs=None, mode='single'):
"""The functions used in the training phase
Args:
triples: The triples ids, as (h, r, t), shape:[batch_size, 3].
negs: Negative samples, defaults to None.
mode: Choose head-predict or tail-predict, Defaults to 'single'.
Returns:
score: The score of triples.
"""
head_emb, relation_emb, tail_emb = self.tri2emb(triples, negs, mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score
def get_score(self, batch, mode):
"""The functions used in the testing phase
Args:
batch: A batch of data.
mode: Choose head-predict or tail-predict.
Returns:
score: The score of triples.
"""
triples = batch["positive_sample"]
head_emb, relation_emb, tail_emb = self.tri2emb(triples, mode=mode)
score = self.score_func(head_emb, relation_emb, tail_emb, mode)
return score |
the-stack_106_31678 | from math import ceil
from statistics import mean
def convert_opinion(agent_list): # noqa: C901
num_agents = len(agent_list)
n_comp = ceil(num_agents * 0.3)
O_ave = [0] * num_agents
S_ave = [0] * num_agents
D = [0] * num_agents
F = [0] * num_agents
def get(lis, a):
return lis[a.id]
def get_threshold():
high = int(len(agent_list) * 0.3)
low = int(len(agent_list) * 0.7)
return (high, low)
def get_diff():
min_opinion = min(a.opinion_od for a in agent_list)
max_opinion = max(a.opinion_od for a in agent_list)
return abs(max_opinion - min_opinion)
def high_leader(i, attr):
i += 1
return mean([getattr(a, attr) for a in agent_list[i : i + n_comp]])
def mid_leader(i, attr):
half_comp = ceil(n_comp / 2)
h = i - 1
higher = [getattr(a, attr) for a in agent_list[h : h - half_comp : -1]]
l = i + 1
lower = [getattr(a, attr) for a in agent_list[l : l + half_comp]]
return mean(higher + lower)
def low_leader(i, attr):
i -= 1
return mean([getattr(a, attr) for a in agent_list[i : i - n_comp : -1]])
def calc_f(a, diff_max):
return 2 + (2 * get(D, a) / diff_max)
def calc_new_O(a):
return a.opinion_od - (a.opinion_od - get(O_ave, a)) / get(F, a)
def calc_for_thresholds(gattr, sattr):
for i, a in enumerate(agent_list):
if a.rank < high_thresh:
setattr(a, sattr, high_leader(i, gattr))
elif low_thresh >= a.rank >= high_thresh:
setattr(a, sattr, mid_leader(i, gattr))
else:
setattr(a, sattr, low_leader(i, gattr))
diff_max = get_diff()
high_thresh, low_thresh = get_threshold()
calc_for_thresholds("score", "S_ave")
for a in agent_list:
D[a.id] = abs(a.score - get(S_ave, a))
F[a.id] = calc_f(a, diff_max)
calc_for_thresholds("opinion_od", "O_ave")
for a in agent_list:
a.opinion_new = calc_new_O(a)
return
|
the-stack_106_31679 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import time
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import context, nn
from mindspore.common import dtype as mstype, set_seed
from mindspore.dataset.vision import Inter
from mindspore.train import Model
def create_model():
"""
Define and return a simple model
"""
class Net(nn.Cell):
def construct(self, x, y):
return x
net = Net()
model_simple = Model(net)
return model_simple
def create_dataset(data_path, batch_size=32, num_parallel_workers=1):
"""
Create dataset for train or test
"""
# Define dataset
mnist_ds = ds.MnistDataset(data_path)
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# Define map operations
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# Apply map operations on images
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
return mnist_ds
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.forked
def test_autotune_train_simple_model(tmp_path):
"""
Feature: Dataset AutoTune
Description: Test Dataset AutoTune for training of a simple model and deserialize the written at config file
Expectation: Training and data deserialization completes successfully
"""
original_seed = ds.config.get_seed()
set_seed(1)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
context.set_context(enable_graph_kernel=True)
at_config_filename = "test_autotune_train_simple_model_at_config.json"
# Enable Dataset AutoTune
original_autotune = ds.config.get_enable_autotune()
ds.config.set_enable_autotune(True, str(tmp_path) + at_config_filename)
ds_train = create_dataset(os.path.join("/home/workspace/mindspore_dataset/mnist", "train"), 32)
model = create_model()
print("Start training.")
epoch_size = 10
start_time = time.time()
model.train(epoch_size, ds_train)
print("Training finished. Took {}s".format(time.time() - start_time))
ds.config.set_enable_autotune(False)
ds_train_deserialized = ds.deserialize(json_filepath=str(tmp_path) + at_config_filename)
num = 0
for data1, data2 in zip(ds_train.create_dict_iterator(num_epochs=1, output_numpy=True),
ds_train_deserialized.create_dict_iterator(num_epochs=1, output_numpy=True)):
np.testing.assert_array_equal(data1['image'], data2['image'])
np.testing.assert_array_equal(data1['label'], data2['label'])
num += 1
assert num == 1875
# Restore settings
ds.config.set_enable_autotune(original_autotune)
ds.config.set_seed(original_seed)
def create_dataset_pyfunc_multiproc(data_path, batch_size=32, num_map_parallel_workers=1, max_rowsize=16):
"""
Create dataset with Python ops list and python_multiprocessing=True for Map op
"""
# Define dataset
data1 = ds.MnistDataset(data_path, num_parallel_workers=8)
data1 = data1.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_map_parallel_workers,
python_multiprocessing=True, max_rowsize=max_rowsize)
# Setup transforms list which include Python ops
transforms_list = [
py_vision.ToTensor(),
lambda x: x,
py_vision.HWC2CHW(),
py_vision.RandomErasing(0.9, value='random'),
py_vision.Cutout(4, 2),
lambda y: y
]
compose_op = py_transforms.Compose(transforms_list)
data1 = data1.map(operations=compose_op, input_columns="image", num_parallel_workers=num_map_parallel_workers,
python_multiprocessing=True, max_rowsize=max_rowsize)
# Apply Dataset Ops
data1 = data1.batch(batch_size, drop_remainder=True)
return data1
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.forked
def test_autotune_pymultiproc_train_simple_model():
"""
Feature: Dataset AutoTune
Description: Test Dataset AutoTune with Python Multiprocessing for Training of a Simple Model
Expectation: Training completes successfully
"""
original_seed = ds.config.get_seed()
set_seed(20)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
context.set_context(enable_graph_kernel=True)
# Reduce memory required by disabling the shared memory optimization
mem_original = ds.config.get_enable_shared_mem()
ds.config.set_enable_shared_mem(False)
# Enable Dataset AutoTune
original_autotune = ds.config.get_enable_autotune()
ds.config.set_enable_autotune(True)
original_interval = ds.config.get_autotune_interval()
ds.config.set_autotune_interval(100)
ds_train = create_dataset_pyfunc_multiproc(os.path.join("/home/workspace/mindspore_dataset/mnist", "train"), 32, 2)
model = create_model()
print("Start Model Training.")
model_start = time.time()
epoch_size = 2
model.train(epoch_size, ds_train)
print("Model training is finished. Took {}s".format(time.time() - model_start))
# Restore settings
ds.config.set_autotune_interval(original_interval)
ds.config.set_enable_autotune(original_autotune)
ds.config.set_enable_shared_mem(mem_original)
ds.config.set_seed(original_seed)
if __name__ == "__main__":
test_autotune_train_simple_model("")
test_autotune_pymultiproc_train_simple_model()
|
the-stack_106_31681 | # -*- coding: utf-8 -*-
"""
Test Generic Map
"""
import os
import tempfile
import warnings
import numpy as np
import pytest
import matplotlib.pyplot as plt
import astropy.wcs
import astropy.units as u
from astropy.io import fits
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy.tests.helper import assert_quantity_allclose
from astropy.visualization import wcsaxes
import sunpy
import sunpy.map
import sunpy.sun
import sunpy.data.test
import sunpy.coordinates
from sunpy.time import parse_time
from sunpy.util import SunpyUserWarning
testpath = sunpy.data.test.rootdir
@pytest.fixture
def hmi_test_map():
return sunpy.map.Map(os.path.join(testpath, "resampled_hmi.fits"))
@pytest.fixture
def aia171_test_map():
return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))
@pytest.fixture
def heliographic_test_map():
return sunpy.map.Map(os.path.join(testpath, 'heliographic_phase_map.fits.gz'))
@pytest.fixture
def aia171_test_map_with_mask(aia171_test_map):
shape = aia171_test_map.data.shape
mask = np.zeros_like(aia171_test_map.data, dtype=bool)
mask[0:shape[0] // 2, 0:shape[1] // 2] = True
return sunpy.map.Map(np.ma.array(aia171_test_map.data, mask=mask), aia171_test_map.meta)
@pytest.fixture
def generic_map():
data = np.ones([6, 6], dtype=np.float64)
header = {
'CRVAL1': 0,
'CRVAL2': 0,
'CRPIX1': 5,
'CRPIX2': 5,
'CDELT1': 10,
'CDELT2': 10,
'CUNIT1': 'arcsec',
'CUNIT2': 'arcsec',
'PC1_1': 0,
'PC1_2': -1,
'PC2_1': 1,
'PC2_2': 0,
'NAXIS1': 6,
'NAXIS2': 6,
'date-obs': '1970/01/01T00:00:00',
'obsrvtry': 'Foo',
'detector': 'bar',
'wavelnth': 10,
'waveunit': 'm'
}
return sunpy.map.Map((data, header))
def test_fits_data_comparison(aia171_test_map):
"""Make sure the data is the same in pyfits and SunPy"""
data = fits.open(os.path.join(testpath, 'aia_171_level1.fits'))[0].data
np.testing.assert_allclose(aia171_test_map.data, data)
def test_get_item(generic_map):
with pytest.raises(NotImplementedError):
generic_map[10, 10]
def test_wcs(aia171_test_map):
wcs = aia171_test_map.wcs
assert isinstance(wcs, astropy.wcs.WCS)
assert all(wcs.wcs.crpix ==
[aia171_test_map.reference_pixel.x.value, aia171_test_map.reference_pixel.y.value])
assert all(wcs.wcs.cdelt == [aia171_test_map.scale.axis1.value,
aia171_test_map.scale.axis1.value])
assert all(
wcs.wcs.crval ==
[aia171_test_map._reference_longitude.value, aia171_test_map._reference_latitude.value])
assert set(wcs.wcs.ctype) == set(
[aia171_test_map.coordinate_system.axis1, aia171_test_map.coordinate_system.axis2])
np.testing.assert_allclose(wcs.wcs.pc, aia171_test_map.rotation_matrix)
assert set(wcs.wcs.cunit) == set([u.Unit(a) for a in aia171_test_map.spatial_units])
def test_dtype(generic_map):
assert generic_map.dtype == np.float64
def test_size(generic_map):
assert generic_map.size == 36 * u.pix
def test_min(generic_map):
assert generic_map.min() == 1
def test_max(generic_map):
assert generic_map.max() == 1
def test_mean(generic_map):
assert generic_map.mean() == 1
def test_std(generic_map):
assert generic_map.std() == 0
# ==============================================================================
# Test the default value of a load of properties
# TODO: Test the header keyword extraction
# ==============================================================================
def test_name(generic_map):
assert isinstance(generic_map.name, str)
def test_nickname(generic_map):
assert generic_map.nickname == 'bar'
def test_nickname_set(generic_map):
assert generic_map.nickname == 'bar'
generic_map.nickname = 'hi'
assert generic_map.nickname == 'hi'
def test_date(generic_map):
assert isinstance(generic_map.date, Time)
def test_date_aia(aia171_test_map):
assert aia171_test_map.date == parse_time('2011-02-15T00:00:00.34')
def test_detector(generic_map):
assert generic_map.detector == 'bar'
def test_dsun(generic_map):
with pytest.warns(SunpyUserWarning, match='Missing metadata for Sun-spacecraft separation'):
assert generic_map.dsun == sunpy.coordinates.get_sunearth_distance(generic_map.date).to(u.m)
def test_rsun_meters(generic_map):
assert generic_map.rsun_meters == sunpy.sun.constants.radius
def test_rsun_obs(generic_map):
with pytest.warns(SunpyUserWarning, match='Missing metadata for solar radius'):
assert generic_map.rsun_obs == sunpy.sun.solar_semidiameter_angular_size(generic_map.date)
def test_coordinate_system(generic_map):
assert generic_map.coordinate_system == ('HPLN- ', 'HPLT- ')
def test_carrington_longitude(generic_map):
with pytest.warns(SunpyUserWarning, match='Missing metadata for Carrington longitude'):
assert generic_map.carrington_longitude == sunpy.coordinates.get_sun_L0(generic_map.date)
def test_heliographic_latitude(generic_map):
with pytest.warns(SunpyUserWarning, match='Missing metadata for heliographic latitude'):
assert generic_map.heliographic_latitude == sunpy.coordinates.get_sun_B0(generic_map.date)
def test_heliographic_longitude(generic_map):
with pytest.warns(SunpyUserWarning, match='Missing metadata for heliographic longitude'):
assert generic_map.heliographic_longitude == 0.
def test_units(generic_map):
generic_map.spatial_units == ('arcsec', 'arcsec')
def test_coordinate_frame(aia171_test_map):
frame = aia171_test_map.coordinate_frame
assert isinstance(frame, sunpy.coordinates.Helioprojective)
assert frame.observer.lat == aia171_test_map.observer_coordinate.frame.lat
assert frame.observer.lon == aia171_test_map.observer_coordinate.frame.lon
assert frame.observer.radius == aia171_test_map.observer_coordinate.frame.radius
assert frame.obstime == aia171_test_map.date
def test_heliographic_longitude_crln(hmi_test_map):
assert hmi_test_map.heliographic_longitude == hmi_test_map.carrington_longitude - sunpy.coordinates.get_sun_L0(hmi_test_map.date)
# ==============================================================================
# Test Rotation WCS conversion
# ==============================================================================
def test_rotation_matrix_pci_j(generic_map):
np.testing.assert_allclose(generic_map.rotation_matrix, np.array([[0., -1.], [1., 0.]]))
def test_rotation_matrix_crota(aia171_test_map):
np.testing.assert_allclose(aia171_test_map.rotation_matrix,
np.array([[9.99999943e-01, -3.38820761e-04],
[3.38820761e-04, 9.99999943e-01]]))
def test_rotation_matrix_cd_cdelt():
data = np.ones([6, 6], dtype=np.float64)
header = {
'CRVAL1': 0,
'CRVAL2': 0,
'CRPIX1': 5,
'CRPIX2': 5,
'CDELT1': 10,
'CDELT2': 9,
'CD1_1': 0,
'CD1_2': -9,
'CD2_1': 10,
'CD2_2': 0,
'NAXIS1': 6,
'NAXIS2': 6,
'CUNIT1': 'arcsec',
'CUNIT2': 'arcsec'
}
cd_map = sunpy.map.Map((data, header))
np.testing.assert_allclose(cd_map.rotation_matrix, np.array([[0., -1.], [1., 0]]))
def test_rotation_matrix_cd_cdelt_square():
data = np.ones([6, 6], dtype=np.float64)
header = {
'CRVAL1': 0,
'CRVAL2': 0,
'CRPIX1': 5,
'CRPIX2': 5,
'CDELT1': 10,
'CDELT2': 10,
'CD1_1': 0,
'CD1_2': -10,
'CD2_1': 10,
'CD2_2': 0,
'NAXIS1': 6,
'NAXIS2': 6,
'CUNIT1': 'arcsec',
'CUNIT2': 'arcsec'
}
cd_map = sunpy.map.Map((data, header))
np.testing.assert_allclose(cd_map.rotation_matrix, np.array([[0., -1], [1., 0]]))
def test_swap_cd():
amap = sunpy.map.Map(os.path.join(testpath, 'swap_lv1_20140606_000113.fits'))
np.testing.assert_allclose(amap.rotation_matrix, np.array([[1., 0], [0, 1.]]))
def test_world_to_pixel(generic_map):
"""Make sure conversion from data units to pixels is internally
consistent"""
# Note: FITS pixels start from 1,1
test_pixel = generic_map.world_to_pixel(generic_map.reference_coordinate, origin=1)
assert_quantity_allclose(test_pixel, generic_map.reference_pixel)
def test_save(aia171_test_map, generic_map):
"""Tests the map save function"""
aiamap = aia171_test_map
afilename = tempfile.NamedTemporaryFile(suffix='fits').name
aiamap.save(afilename, filetype='fits', overwrite=True)
loaded_save = sunpy.map.Map(afilename)
assert isinstance(loaded_save, sunpy.map.sources.AIAMap)
assert loaded_save.meta == aiamap.meta
assert_quantity_allclose(loaded_save.data, aiamap.data)
def test_save_compressed(aia171_test_map, generic_map):
"""Tests the map save function"""
aiamap = aia171_test_map
afilename = tempfile.NamedTemporaryFile(suffix='fits').name
aiamap.save(afilename, filetype='fits', hdu_type=fits.CompImageHDU, overwrite=True)
loaded_save = sunpy.map.Map(afilename)
# We expect that round tripping to CompImageHDU will change the header and
# the data a little.
assert isinstance(loaded_save, sunpy.map.sources.AIAMap)
def test_default_shift():
"""Test that the default shift is zero"""
data = np.ones([6, 6], dtype=np.float64)
header = {
'CRVAL1': 0,
'CRVAL2': 0,
'CRPIX1': 5,
'CRPIX2': 5,
'CDELT1': 10,
'CDELT2': 9,
'CD1_1': 0,
'CD1_2': -9,
'CD2_1': 10,
'CD2_2': 0,
'NAXIS1': 6,
'NAXIS2': 6,
'CUNIT1': 'arcsec',
'CUNIT2': 'arcsec',
}
cd_map = sunpy.map.Map((data, header))
assert cd_map.shifted_value[0].value == 0
assert cd_map.shifted_value[1].value == 0
def test_shift_applied(generic_map):
"""Test that adding a shift actually updates the reference coordinate"""
original_reference_coord = (generic_map.reference_coordinate.Tx,
generic_map.reference_coordinate.Ty)
x_shift = 5 * u.arcsec
y_shift = 13 * u.arcsec
shifted_map = generic_map.shift(x_shift, y_shift)
assert shifted_map.reference_coordinate.Tx - x_shift == original_reference_coord[0]
assert shifted_map.reference_coordinate.Ty - y_shift == original_reference_coord[1]
crval1 = ((generic_map.meta.get('crval1') * generic_map.spatial_units[0] +
shifted_map.shifted_value[0]).to(shifted_map.spatial_units[0])).value
assert shifted_map.meta.get('crval1') == crval1
crval2 = ((generic_map.meta.get('crval2') * generic_map.spatial_units[1] +
shifted_map.shifted_value[1]).to(shifted_map.spatial_units[1])).value
assert shifted_map.meta.get('crval2') == crval2
def test_set_shift(generic_map):
"""Test that previously applied shift is stored in the shifted_value property"""
x_shift = 5 * u.arcsec
y_shift = 13 * u.arcsec
shifted_map = generic_map.shift(x_shift, y_shift)
resultant_shift = shifted_map.shifted_value
assert resultant_shift[0] == x_shift
assert resultant_shift[1] == y_shift
def test_shift_history(generic_map):
"""Test the shifted_value is added to a non-zero previous shift"""
x_shift1 = 5 * u.arcsec
y_shift1 = 13 * u.arcsec
shifted_map1 = generic_map.shift(x_shift1, y_shift1)
x_shift2 = -28.5 * u.arcsec
y_shift2 = 120 * u.arcsec
final_shifted_map = shifted_map1.shift(x_shift2, y_shift2)
resultant_shift = final_shifted_map.shifted_value
assert resultant_shift[0] == x_shift1 + x_shift2
assert resultant_shift[1] == y_shift1 + y_shift2
def test_submap(generic_map):
"""Check data and header information for a submap"""
width = generic_map.data.shape[1]
height = generic_map.data.shape[0]
# Create a submap of the top-right quadrant of the image
submap = generic_map.submap([width / 2., height / 2.] * u.pix, [width, height] * u.pix)
# Check to see if submap properties were updated properly
assert submap.reference_pixel.x.value == generic_map.meta['crpix1'] - width / 2.
assert submap.reference_pixel.y.value == generic_map.meta['crpix2'] - height / 2.
assert submap.data.shape[1] == width / 2.
assert submap.data.shape[0] == height / 2.
# Check to see if header was updated
assert submap.meta['naxis1'] == width / 2.
assert submap.meta['naxis2'] == height / 2.
# Check data
assert (generic_map.data[height // 2:height, width // 2:width] == submap.data).all()
resample_test_data = [('linear', (100, 200) * u.pixel), ('neighbor', (128, 256) * u.pixel),
('nearest', (512, 128) * u.pixel), ('spline', (200, 200) * u.pixel)]
@pytest.mark.parametrize('sample_method, new_dimensions', resample_test_data)
def test_resample_dimensions(generic_map, sample_method, new_dimensions):
"""Check that resampled map has expected dimensions."""
resampled_map = generic_map.resample(new_dimensions, method=sample_method)
assert resampled_map.dimensions[0] == new_dimensions[0]
assert resampled_map.dimensions[1] == new_dimensions[1]
@pytest.mark.parametrize('sample_method, new_dimensions', resample_test_data)
def test_resample_metadata(generic_map, sample_method, new_dimensions):
"""
Check that the resampled map has correctly adjusted metadata.
"""
resampled_map = generic_map.resample(new_dimensions, method=sample_method)
assert float(resampled_map.meta['cdelt1']) / generic_map.meta['cdelt1'] \
== float(generic_map.data.shape[1]) / resampled_map.data.shape[1]
assert float(resampled_map.meta['cdelt2']) / generic_map.meta['cdelt2'] \
== float(generic_map.data.shape[0]) / resampled_map.data.shape[0]
assert resampled_map.meta['crpix1'] == (resampled_map.data.shape[1] + 1) / 2.
assert resampled_map.meta['crpix2'] == (resampled_map.data.shape[0] + 1) / 2.
assert resampled_map.meta['crval1'] == generic_map.center.Tx.value
assert resampled_map.meta['crval2'] == generic_map.center.Ty.value
for key in generic_map.meta:
if key not in ('cdelt1', 'cdelt2', 'crpix1', 'crpix2', 'crval1', 'crval2'):
assert resampled_map.meta[key] == generic_map.meta[key]
def test_superpixel(aia171_test_map, aia171_test_map_with_mask):
dimensions = (2, 2) * u.pix
superpixel_map_sum = aia171_test_map.superpixel(dimensions)
assert_quantity_allclose(superpixel_map_sum.dimensions[1],
aia171_test_map.dimensions[1] / dimensions[1] * u.pix)
assert_quantity_allclose(superpixel_map_sum.dimensions[0],
aia171_test_map.dimensions[0] / dimensions[0] * u.pix)
assert_quantity_allclose(superpixel_map_sum.data[0][0],
(aia171_test_map.data[0][0] + aia171_test_map.data[0][1] +
aia171_test_map.data[1][0] + aia171_test_map.data[1][1]))
superpixel_map_avg = aia171_test_map.superpixel(dimensions, func=np.mean)
assert_quantity_allclose(superpixel_map_avg.dimensions[1],
aia171_test_map.dimensions[1] / dimensions[1] * u.pix)
assert_quantity_allclose(superpixel_map_avg.dimensions[0],
aia171_test_map.dimensions[0] / dimensions[0] * u.pix)
assert_quantity_allclose(superpixel_map_avg.data[0][0],
(aia171_test_map.data[0][0] + aia171_test_map.data[0][1] +
aia171_test_map.data[1][0] + aia171_test_map.data[1][1]) / 4.0)
# Test that the mask is respected
superpixel_map_sum = aia171_test_map_with_mask.superpixel(dimensions)
assert superpixel_map_sum.mask is not None
assert_quantity_allclose(superpixel_map_sum.mask.shape[0],
aia171_test_map.dimensions[1] / dimensions[1])
assert_quantity_allclose(superpixel_map_sum.mask.shape[1],
aia171_test_map.dimensions[0] / dimensions[0])
# Test that the offset is respected
superpixel_map_sum = aia171_test_map_with_mask.superpixel(dimensions, offset=(1, 1) * u.pix)
assert_quantity_allclose(superpixel_map_sum.dimensions[1],
aia171_test_map.dimensions[1] / dimensions[1] * u.pix - 1 * u.pix)
assert_quantity_allclose(superpixel_map_sum.dimensions[0],
aia171_test_map.dimensions[0] / dimensions[0] * u.pix - 1 * u.pix)
dimensions = (7, 9) * u.pix
superpixel_map_sum = aia171_test_map_with_mask.superpixel(dimensions, offset=(4, 4) * u.pix)
assert_quantity_allclose(
superpixel_map_sum.dimensions[0],
np.int((aia171_test_map.dimensions[0] / dimensions[0]).value) * u.pix - 1 * u.pix)
assert_quantity_allclose(
superpixel_map_sum.dimensions[1],
np.int((aia171_test_map.dimensions[1] / dimensions[1]).value) * u.pix - 1 * u.pix)
def calc_new_matrix(angle):
c = np.cos(np.deg2rad(angle))
s = np.sin(np.deg2rad(angle))
return np.array([[c, -s], [s, c]])
def test_rotate(aia171_test_map):
rotated_map_1 = aia171_test_map.rotate(20 * u.deg)
rotated_map_2 = rotated_map_1.rotate(20 * u.deg)
np.testing.assert_allclose(rotated_map_1.rotation_matrix,
np.dot(aia171_test_map.rotation_matrix, calc_new_matrix(20).T))
np.testing.assert_allclose(rotated_map_2.rotation_matrix,
np.dot(aia171_test_map.rotation_matrix, calc_new_matrix(40).T))
# Rotation of a map by a non-integral multiple of 90 degrees expands the map
# and assigns the value of 0 to corner pixels. This results in a reduction
# of the mean for a map of all non-negative values.
assert rotated_map_2.data.shape > rotated_map_1.data.shape > aia171_test_map.data.shape
np.testing.assert_allclose(rotated_map_1.data[0, 0], 0., atol=1e-7)
np.testing.assert_allclose(rotated_map_2.data[0, 0], 0., atol=1e-7)
assert rotated_map_2.mean() < rotated_map_1.mean() < aia171_test_map.mean()
rotated_map_3 = aia171_test_map.rotate(0 * u.deg, scale=1.5)
assert rotated_map_3.mean() > aia171_test_map.mean()
# Mean and std should be equal when angle of rotation is integral multiple
# of 90 degrees for a square map
rotated_map_4 = aia171_test_map.rotate(90 * u.deg, scale=1.5)
np.testing.assert_allclose(rotated_map_3.mean(), rotated_map_4.mean(), rtol=1e-3)
np.testing.assert_allclose(rotated_map_3.std(), rotated_map_4.std(), rtol=1e-3)
rotated_map_5 = aia171_test_map.rotate(180 * u.deg, scale=1.5)
np.testing.assert_allclose(rotated_map_3.mean(), rotated_map_5.mean(), rtol=1e-3)
np.testing.assert_allclose(rotated_map_3.std(), rotated_map_5.std(), rtol=2e-3)
# Rotation of a rectangular map by a large enough angle will change which dimension is larger
aia171_test_map_crop = aia171_test_map.submap(
SkyCoord(
[[0, 0], [1000, 400]] * u.arcsec, frame=aia171_test_map.coordinate_frame))
aia171_test_map_crop_rot = aia171_test_map_crop.rotate(60 * u.deg)
assert aia171_test_map_crop.data.shape[0] < aia171_test_map_crop.data.shape[1]
assert aia171_test_map_crop_rot.data.shape[0] > aia171_test_map_crop_rot.data.shape[1]
# Same test as above, to test the other direction
aia171_test_map_crop = aia171_test_map.submap(
SkyCoord(
[[0, 0], [400, 1000]] * u.arcsec, frame=aia171_test_map.coordinate_frame))
aia171_test_map_crop_rot = aia171_test_map_crop.rotate(60 * u.deg)
assert aia171_test_map_crop.data.shape[0] > aia171_test_map_crop.data.shape[1]
assert aia171_test_map_crop_rot.data.shape[0] < aia171_test_map_crop_rot.data.shape[1]
def test_rotate_pad_crpix(generic_map):
rotated_map = generic_map.rotate(30*u.deg)
# This tests that the reference pixel of the map is in the expected place.
assert rotated_map.data.shape != generic_map.data.shape
assert_quantity_allclose(u.Quantity(rotated_map.reference_pixel),
u.Quantity((6.049038105675565, 7.5490381056760265), u.pix))
def test_rotate_recenter(generic_map):
rotated_map = generic_map.rotate(20 * u.deg, recenter=True)
pixel_array_center = (np.flipud(rotated_map.data.shape) - 1) / 2.0
assert_quantity_allclose(
(pixel_array_center + 1) * u.pix, # FITS indexes from 1
u.Quantity(rotated_map.reference_pixel))
def test_rotate_crota_remove(aia171_test_map):
rot_map = aia171_test_map.rotate()
assert rot_map.meta.get('CROTA1', None) is None
assert rot_map.meta.get('CROTA2', None) is None
def test_rotate_scale_cdelt(generic_map):
rot_map = generic_map.rotate(scale=10.)
assert rot_map.meta['CDELT1'] == generic_map.meta['CDELT1'] / 10.
assert rot_map.meta['CDELT2'] == generic_map.meta['CDELT2'] / 10.
def test_rotate_new_matrix(generic_map):
# Rotate by CW90 to go from CCW 90 in generic map to CCW 180
rot_map = generic_map.rotate(rmatrix=np.array([[0, 1], [-1, 0]]))
np.testing.assert_allclose(rot_map.rotation_matrix, np.array([[-1, 0], [0, -1]]))
def test_rotate_rmatrix_angle(generic_map):
with pytest.raises(ValueError):
generic_map.rotate(angle=5, rmatrix=np.array([[1, 0], [0, 1]]))
def test_rotate_invalid_order(generic_map):
with pytest.raises(ValueError):
generic_map.rotate(order=6)
with pytest.raises(ValueError):
generic_map.rotate(order=-1)
def test_as_mpl_axes_aia171(aia171_test_map):
ax = plt.subplot(projection=aia171_test_map)
assert isinstance(ax, wcsaxes.WCSAxes)
# This test doesn't work, it seems that WCSAxes copies or changes the WCS
# object.
# assert ax.wcs is aia171_test_map.wcs
assert all([ct1 == ct2 for ct1, ct2 in zip(ax.wcs.wcs.ctype, aia171_test_map.wcs.wcs.ctype)])
# Map adds these attributes, so we use them to check.
assert hasattr(ax.wcs, 'heliographic_observer')
def test_pixel_to_world_no_projection(generic_map):
out = generic_map.pixel_to_world(*u.Quantity(generic_map.reference_pixel)+1*u.pix, origin=1)
assert_quantity_allclose(out.Tx, -10*u.arcsec)
assert_quantity_allclose(out.Ty, 10*u.arcsec)
def test_validate_meta(generic_map):
"""Check to see if_validate_meta displays an appropriate error"""
with warnings.catch_warnings(record=True) as w:
bad_header = {
'CRVAL1': 0,
'CRVAL2': 0,
'CRPIX1': 5,
'CRPIX2': 5,
'CDELT1': 10,
'CDELT2': 10,
'CUNIT1': 'ARCSEC',
'CUNIT2': 'ARCSEC',
'PC1_1': 0,
'PC1_2': -1,
'PC2_1': 1,
'PC2_2': 0,
'NAXIS1': 6,
'NAXIS2': 6,
'date-obs': '1970/01/01T00:00:00',
'obsrvtry': 'Foo',
'detector': 'bar',
'wavelnth': 10,
'waveunit': 'ANGSTROM'
}
bad_map = sunpy.map.Map((generic_map.data, bad_header))
assert 'waveunit'.upper() in str(w[0].message)
# Heliographic Map Tests
def test_hg_coord(heliographic_test_map):
assert heliographic_test_map.coordinate_system[0] == "CRLN-CAR"
assert heliographic_test_map.coordinate_system[1] == "CRLT-CAR"
assert isinstance(heliographic_test_map.coordinate_frame,
sunpy.coordinates.HeliographicCarrington)
def test_hg_pix_to_data(heliographic_test_map):
out = heliographic_test_map.pixel_to_world(180 * u.pix, 90 * u.pix)
assert isinstance(out, SkyCoord)
assert isinstance(out.frame, sunpy.coordinates.HeliographicCarrington)
assert_quantity_allclose(out.lon, 0 * u.deg)
assert_quantity_allclose(out.lat, 0 * u.deg)
def test_hg_data_to_pix(heliographic_test_map):
out = heliographic_test_map.world_to_pixel(
SkyCoord(
0 * u.deg, 0 * u.deg, frame=heliographic_test_map.coordinate_frame))
assert_quantity_allclose(out[0], 180 * u.pix)
assert_quantity_allclose(out[1], 90 * u.pix)
# Heliocentric Map Tests
def test_hc_warn():
data = np.ones([6, 6], dtype=np.float64)
header = {
'CRVAL1': 0,
'CRVAL2': 0,
'CRPIX1': 5,
'CRPIX2': 5,
'CDELT1': 10,
'CDELT2': 10,
'CUNIT1': 'km',
'CUNIT2': 'km',
'CTYPE1': 'SOLX ',
'CTYPE2': 'SOLY ',
'PC1_1': 0,
'PC1_2': -1,
'PC2_1': 1,
'PC2_2': 0,
'NAXIS1': 6,
'NAXIS2': 6,
'date-obs': '1970/01/01T00:00:00',
'obsrvtry': 'Foo',
'detector': 'bar',
'wavelnth': 10,
'waveunit': 'm'
}
with pytest.warns(UserWarning):
sunpy.map.Map((data, header))
# Dimension testing
def test_more_than_two_dimensions():
"""Checks to see if an appropriate error is raised when a FITS with more than two dimensions is
loaded. We need to load a >2-dim dataset with a TELESCOP header"""
# Data crudely represnts 4 stokes, 4 wavelengths with Y,X of 3 and 5.
bad_data = np.random.rand(4, 4, 3, 5)
hdr = fits.Header()
hdr['TELESCOP'] = 'XXX'
hdr['cunit1'] = 'arcsec'
hdr['cunit2'] = 'arcsec'
with pytest.warns(SunpyUserWarning, match='This file contains more than 2 dimensions.'):
bad_map = sunpy.map.Map(bad_data, hdr)
# Test fails if map.ndim > 2 and if the dimensions of the array are wrong.
assert bad_map.ndim is 2
assert_quantity_allclose(bad_map.dimensions, (5, 3) * u.pix)
def test_missing_metadata_warnings():
# Checks that warnings for missing metadata are only raised once
with pytest.warns(Warning) as record:
header = {}
header['cunit1'] = 'arcsec'
header['cunit2'] = 'arcsec'
array_map = sunpy.map.Map(np.random.rand(20, 15), header)
array_map.peek()
# There should be 4 warnings for missing metadata
assert len([w for w in record if 'Missing metadata' in str(w)]) == 4
def test_fits_header(aia171_test_map):
assert isinstance(aia171_test_map.fits_header, fits.Header)
|
the-stack_106_31682 | #!/usr/bin/env python
import math
import time
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../")))
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
OrderBookEvent,
OrderBookTradeEvent,
TradeType
)
import asyncio
import logging
from typing import (
Dict,
Optional,
List)
import unittest
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_tracker import OrderBookTrackerDataSourceType
from hummingbot.core.utils.async_utils import (
safe_ensure_future,
safe_gather,
)
from hummingbot.market.huobi.huobi_order_book_tracker import HuobiOrderBookTracker
class HuobiOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[HuobiOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"btcusdt",
"xrpusdt"
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: HuobiOrderBookTracker = HuobiOrderBookTracker(
data_source_type=OrderBookTrackerDataSourceType.EXCHANGE_API,
symbols=cls.trading_pairs
)
cls.order_book_tracker_task: asyncio.Task = safe_ensure_future(cls.order_book_tracker.start())
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = safe_ensure_future(safe_gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Time out running parallel async task in tests.")
timer += 1
now = time.time()
next_iteration = now // 1.0 + 1
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
"""
Test if order book tracker is able to retrieve order book trade message from exchange and
emit order book trade events after correctly parsing the trade messages
"""
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.symbol in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
# Wait 5 seconds to process some diffs.
self.ev_loop.run_until_complete(asyncio.sleep(5.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
btcusdt_book: OrderBook = order_books["btcusdt"]
xrpusdt_book: OrderBook = order_books["xrpusdt"]
# print(btcusdt_book.snapshot)
# print(xrpusdt_book.snapshot)
self.assertGreaterEqual(btcusdt_book.get_price_for_volume(True, 10).result_price,
btcusdt_book.get_price(True))
self.assertLessEqual(btcusdt_book.get_price_for_volume(False, 10).result_price,
btcusdt_book.get_price(False))
self.assertGreaterEqual(xrpusdt_book.get_price_for_volume(True, 10000).result_price,
xrpusdt_book.get_price(True))
self.assertLessEqual(xrpusdt_book.get_price_for_volume(False, 10000).result_price,
xrpusdt_book.get_price(False))
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
|
the-stack_106_31683 | # Copyright 2019 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import nova.privsep.libvirt
from nova import test
@ddt.ddt
class PrivsepLibvirtMountTestCase(test.NoDBTestCase):
QB_BINARY = "mount.quobyte"
QB_FIXED_OPT_1 = "--disable-xattrs"
FAKE_VOLUME = "fake_volume"
FAKE_MOUNT_BASE = "/fake/mount/base"
def setUp(self):
super(PrivsepLibvirtMountTestCase, self).setUp()
self.useFixture(test.nova_fixtures.PrivsepFixture())
@ddt.data(None, "/FAKE/CFG/PATH.cfg")
@mock.patch('oslo_concurrency.processutils.execute')
def test_systemd_run_qb_mount(self, cfg_file, mock_execute):
sysd_bin = "systemd-run"
sysd_opt_1 = "--scope"
nova.privsep.libvirt.systemd_run_qb_mount(self.FAKE_VOLUME,
self.FAKE_MOUNT_BASE,
cfg_file=cfg_file)
if cfg_file:
mock_execute.assert_called_once_with(sysd_bin, sysd_opt_1,
self.QB_BINARY,
self.QB_FIXED_OPT_1,
self.FAKE_VOLUME,
self.FAKE_MOUNT_BASE,
"-c",
cfg_file)
else:
mock_execute.assert_called_once_with(sysd_bin, sysd_opt_1,
self.QB_BINARY,
self.QB_FIXED_OPT_1,
self.FAKE_VOLUME,
self.FAKE_MOUNT_BASE)
@ddt.data(None, "/FAKE/CFG/PATH.cfg")
@mock.patch('oslo_concurrency.processutils.execute')
def test_unprivileged_qb_mount(self, cfg_file, mock_execute):
nova.privsep.libvirt.unprivileged_qb_mount(self.FAKE_VOLUME,
self.FAKE_MOUNT_BASE,
cfg_file=cfg_file)
if cfg_file:
mock_execute.assert_called_once_with(self.QB_BINARY,
self.QB_FIXED_OPT_1,
self.FAKE_VOLUME,
self.FAKE_MOUNT_BASE,
"-c",
cfg_file)
else:
mock_execute.assert_called_once_with(self.QB_BINARY,
self.QB_FIXED_OPT_1,
self.FAKE_VOLUME,
self.FAKE_MOUNT_BASE)
|
the-stack_106_31684 | #!/usr/bin/env python3
# Copyright 2019-2022 Jean-Luc Vay, Maxence Thevenet, Remi Lehe, Axel Huebl
#
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
#
# This is a script that analyses the simulation results from
# the script `inputs.multi.rt`. This simulates a 3D periodic plasma wave.
# The electric field in the simulation is given (in theory) by:
# $$ E_x = \epsilon \,\frac{m_e c^2 k_x}{q_e}\sin(k_x x)\cos(k_y y)\cos(k_z z)\sin( \omega_p t)$$
# $$ E_y = \epsilon \,\frac{m_e c^2 k_y}{q_e}\cos(k_x x)\sin(k_y y)\cos(k_z z)\sin( \omega_p t)$$
# $$ E_z = \epsilon \,\frac{m_e c^2 k_z}{q_e}\cos(k_x x)\cos(k_y y)\sin(k_z z)\sin( \omega_p t)$$
import os
import re
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import yt
yt.funcs.mylog.setLevel(50)
import numpy as np
from scipy.constants import c, e, epsilon_0, m_e
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
# this will be the name of the plot file
fn = sys.argv[1]
# Parse test name and check if current correction (psatd.current_correction=1) is applied
current_correction = True if re.search( 'current_correction', fn ) else False
# Parse test name and check if Vay current deposition (algo.current_deposition=vay) is used
vay_deposition = True if re.search( 'Vay_deposition', fn ) else False
# Parse test name and check if div(E)/div(B) cleaning (warpx.do_div<e,b>_cleaning=1) is used
div_cleaning = True if re.search('div_cleaning', fn) else False
# Parameters (these parameters must match the parameters in `inputs.multi.rt`)
epsilon = 0.01
n = 4.e24
n_osc_x = 2
n_osc_y = 2
n_osc_z = 2
lo = [-20.e-6, -20.e-6, -20.e-6]
hi = [ 20.e-6, 20.e-6, 20.e-6]
Ncell = [64, 64, 64]
# Wave vector of the wave
kx = 2.*np.pi*n_osc_x/(hi[0]-lo[0])
ky = 2.*np.pi*n_osc_y/(hi[1]-lo[1])
kz = 2.*np.pi*n_osc_z/(hi[2]-lo[2])
# Plasma frequency
wp = np.sqrt((n*e**2)/(m_e*epsilon_0))
k = {'Ex':kx, 'Ey':ky, 'Ez':kz}
cos = {'Ex': (0,1,1), 'Ey':(1,0,1), 'Ez':(1,1,0)}
def get_contribution( is_cos, k, idim ):
du = (hi[idim]-lo[idim])/Ncell[idim]
u = lo[idim] + du*( 0.5 + np.arange(Ncell[idim]) )
if is_cos[idim] == 1:
return( np.cos(k*u) )
else:
return( np.sin(k*u) )
def get_theoretical_field( field, t ):
amplitude = epsilon * (m_e*c**2*k[field])/e * np.sin(wp*t)
cos_flag = cos[field]
x_contribution = get_contribution( cos_flag, kx, 0 )
y_contribution = get_contribution( cos_flag, ky, 1 )
z_contribution = get_contribution( cos_flag, kz, 2 )
E = amplitude * x_contribution[:, np.newaxis, np.newaxis] \
* y_contribution[np.newaxis, :, np.newaxis] \
* z_contribution[np.newaxis, np.newaxis, :]
return( E )
# Read the file
ds = yt.load(fn)
# Check that the particle selective output worked:
species = 'electrons'
print('ds.field_list', ds.field_list)
for field in ['particle_weight',
'particle_momentum_x']:
print('assert that this is in ds.field_list', (species, field))
assert (species, field) in ds.field_list
for field in ['particle_momentum_y',
'particle_momentum_z']:
print('assert that this is NOT in ds.field_list', (species, field))
assert (species, field) not in ds.field_list
species = 'positrons'
for field in ['particle_momentum_x',
'particle_momentum_y']:
print('assert that this is NOT in ds.field_list', (species, field))
assert (species, field) not in ds.field_list
t0 = ds.current_time.to_value()
data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge,
dims=ds.domain_dimensions)
# Check the validity of the fields
error_rel = 0
for field in ['Ex', 'Ey', 'Ez']:
E_sim = data[('mesh',field)].to_ndarray()
E_th = get_theoretical_field(field, t0)
max_error = abs(E_sim-E_th).max()/abs(E_th).max()
print('%s: Max error: %.2e' %(field,max_error))
error_rel = max( error_rel, max_error )
# Plot the last field from the loop (Ez at iteration 40)
plt.subplot2grid( (1,2), (0,0) )
plt.imshow( E_sim[:,:,Ncell[2]//2] )
plt.colorbar()
plt.title('Ez, last iteration\n(simulation)')
plt.subplot2grid( (1,2), (0,1) )
plt.imshow( E_th[:,:,Ncell[2]//2] )
plt.colorbar()
plt.title('Ez, last iteration\n(theory)')
plt.tight_layout()
plt.savefig('langmuir_multi_analysis.png')
tolerance_rel = 0.15
print("error_rel : " + str(error_rel))
print("tolerance_rel: " + str(tolerance_rel))
assert( error_rel < tolerance_rel )
# Check relative L-infinity spatial norm of rho/epsilon_0 - div(E) when
# current correction (psatd.do_current_correction=1) is applied or when
# Vay current deposition (algo.current_deposition=vay) is used
if current_correction or vay_deposition:
rho = data[('boxlib','rho')].to_ndarray()
divE = data[('boxlib','divE')].to_ndarray()
error_rel = np.amax( np.abs( divE - rho/epsilon_0 ) ) / np.amax( np.abs( rho/epsilon_0 ) )
tolerance = 1.e-9
print("Check charge conservation:")
print("error_rel = {}".format(error_rel))
print("tolerance = {}".format(tolerance))
assert( error_rel < tolerance )
if div_cleaning:
ds_old = yt.load('Langmuir_multi_psatd_div_cleaning_plt000038')
ds_mid = yt.load('Langmuir_multi_psatd_div_cleaning_plt000039')
ds_new = yt.load(fn) # this is the last plotfile
ad_old = ds_old.covering_grid(level = 0, left_edge = ds_old.domain_left_edge, dims = ds_old.domain_dimensions)
ad_mid = ds_mid.covering_grid(level = 0, left_edge = ds_mid.domain_left_edge, dims = ds_mid.domain_dimensions)
ad_new = ds_new.covering_grid(level = 0, left_edge = ds_new.domain_left_edge, dims = ds_new.domain_dimensions)
rho = ad_mid['rho'].v.squeeze()
divE = ad_mid['divE'].v.squeeze()
F_old = ad_old['F'].v.squeeze()
F_new = ad_new['F'].v.squeeze()
# Check max norm of error on dF/dt = div(E) - rho/epsilon_0
# (the time interval between the old and new data is 2*dt)
dt = 1.203645751e-15
x = F_new - F_old
y = (divE - rho/epsilon_0) * 2 * dt
error_rel = np.amax(np.abs(x - y)) / np.amax(np.abs(y))
tolerance = 1e-2
print("Check div(E) cleaning:")
print("error_rel = {}".format(error_rel))
print("tolerance = {}".format(tolerance))
assert(error_rel < tolerance)
test_name = os.path.split(os.getcwd())[1]
if re.search( 'single_precision', fn ):
checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3)
else:
checksumAPI.evaluate_checksum(test_name, fn)
|
the-stack_106_31685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/dandavison/iterm2-dwim/blob/master/iterm2_dwim/parsers/parsers.py
__version__ = "0.0.3"
import sys, os, json, re
global cli_map
cli_map = {
'java': 'idea',
'rb': 'mine',
'ruby': 'mine',
'py': 'charm',
'python': 'charm',
'kt': 'studio',
'txt': os.getenv('EDITOR', ''),
}
def main():
_file, _line = _sanitize_params()
_run([_get_cli(_file), _file, _line])
def _sanitize_params():
global cli_map
config_file = os.path.expanduser('~/.iterm_file_handler.json')
if any(_arg in ['-O', 'override'] for _arg in sys.argv):
with open(config_file, "w+") as jf:
json.dump(cli_map, jf)
print("Config file created at '{}', Please make the changes".format(
config_file))
sys.exit()
if any(_arg in ['-h', 'help'] for _arg in sys.argv):
# User a cli lib for parsing param
print('''
-h, help -- print help
-O, override -- creates json file at '~/.iterm_file_handler.json' to override defalt file handler
Set `/usr/local/bin/itfh "\1" "\2" "\5" 'test'` in Iterm2 settings > profile > Advandced > Run Command
To activate itfh
''')
sys.exit()
if os.path.exists(config_file):
with open(config_file, 'r') as jf:
cli_map.update(json.load(jf))
if ':' in sys.argv[1]:
file, line = sys.argv[1].split(':')
elif sys.argv[2].isnumeric() and os.path.exists(sys.argv[1]):
file, line = sys.argv[1], sys.argv[2]
elif 'line' in sys.argv[4]:
# for ruby pry session
try:
file, line = sys.argv[1],re.search('line.* (\d+)',sys.argv[4]).group(1)
except:
file, line = sys.argv[1], '1'
else:
file, line = sys.argv[1:3]
if not file.startswith('/'):
""" Appending PWD if file path is not absolute"""
file = sys.argv[3] + '/' + file
if len(line) < 1:
line = '1'
return file, line
def _get_extention(file):
if '.' in file:
return file.split('.')[-1]
elif os.path.exists(file.split(':')[0]):
# getting file type from shebang
with open(file.split(':')[0]) as f:
first_line = f.readline().rstrip()
# _log('first_line --> ' + first_line)
file_type = first_line.split(' ')[-1]
# _log('file_type --> ' + file_type)
return file_type
raise RuntimeError("File extenstion not found in '{}'".format(file))
def _get_cli(for_file):
try:
from distutils.spawn import find_executable as which
global cli_map
name = cli_map.get(_get_extention(for_file), '')
cli = (which(name, path='/usr/local/bin') or '/usr/bin/open')
return cli
except Exception as e:
_log(e)
_log('cli app is ---> ' + name)
_log('cli path ---> ' + cli)
def _run(cmd):
import subprocess
args = sys.argv[1:]
if '/usr/bin/open' in cmd:
del cmd[-1]
out = subprocess.call(cmd)
if 'mine' in cmd[0]:
import requests
out = requests.get(f"http://localhost:63342/api/file/{cmd[1]}:{cmd[2]}").status_code
else:
# converting file_name.rb:23 to --line 23 file_name.rb
cmd[-2:] = ['--line',cmd[-2:][1], cmd[-2:][0]]
if (out != 0 or out == 200) or ('test' in args):
_log('Comand created --> ' + ' '.join(cmd))
_log('Input passsed --> ' + str({k+1 : v for k, v in enumerate(args)}))
_log('Command for debug --> ' + "/usr/local/bin/itfh " + ' '.join(['"{0}"'.format(x) for x in args]))
_log('')
# _log('Input passsed --> ' + str(args))
def _log(msg):
import datetime
log_file = os.path.expanduser('~/.iterm_file_handler.log')
time_stamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with open(log_file, 'a') as f:
f.write("[{}] {} \n".format(time_stamp, msg))
if __name__ == '__main__':
main() |
the-stack_106_31687 | import pytest
from django.urls import resolve, reverse
from termplanner.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
|
the-stack_106_31689 | # ***************************************************************
# Copyright (c) 2020 Jittor. Authors:
# Guowei Yang <[email protected]>
# Dun Liang <[email protected]>.
# All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
from torch.autograd import Variable
except:
torch = None
skip_this_test = True
@unittest.skipIf(skip_this_test, "No Torch found")
class TestCumprod(unittest.TestCase):
def test_cumprod_cpu(self):
for i in range(1,6):
for j in range(i):
print("test", i, j)
x = np.random.rand(*((10,)*i))
x_jt = jt.array(x)
y_jt = jt.cumprod(x_jt, j).sqr()
g_jt = jt.grad(y_jt.sum(), x_jt)
x_tc = Variable(torch.from_numpy(x), requires_grad=True)
y_tc = torch.cumprod(x_tc, j)**2
y_tc.sum().backward()
g_tc = x_tc.grad
assert np.allclose(y_jt.numpy(), y_tc.data)
np.testing.assert_allclose(g_jt.numpy(), g_tc.data, atol=1e-5)
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
@jt.flag_scope(use_cuda=1)
def test_cumprod_gpu(self):
self.test_cumprod_cpu()
if __name__ == "__main__":
unittest.main() |
the-stack_106_31693 | import time
from datetime import datetime
import discord
from discord import Embed
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.context import SlashContext
import helpers
class Utilities(commands.Cog):
"""
General Utilities
"""
@cog_ext.cog_slash(name="ping")
async def _ping(self, ctx: SlashContext):
"""
Status check
"""
start_time = time.time()
message = await ctx.send('pong. `DWSPz latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms`')
end_time = time.time()
await message.edit(content='pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms` ' +
'`Response time: ' + str(int((end_time - start_time) * 1000)) + 'ms`')
@cog_ext.cog_slash(name="source")
async def _source(self, ctx: SlashContext):
"""
Print a link to the source code
"""
await ctx.send(content='Created by `Joel Adams`\n'
'Further development by `Cerys Lewis`\n'
'https://github.com/JoelLucaAdams/RedditBot')
@cog_ext.cog_slash(name="reddit")
async def _reddit(self, ctx: SlashContext, url: str):
"""
replies with a video from the reddit link
"""
await ctx.defer()
success, json_payload = helpers.get_reddit_json_payload(url)
if success is False:
await ctx.send(content=json_payload.get('error-message'))
return
embed = Embed(
description=f'[{json_payload.get("subreddit")} - {json_payload.get("title")}]({url})',
color=discord.Color.from_rgb(255, 69, 0),
timestamp=datetime.utcnow()
)
embed.set_footer(
icon_url=ctx.author.avatar_url,
text=f'Sent by {ctx.author.display_name}'
)
is_video, discord_file = helpers.get_video_url_from_payload(json_payload.get('video'), json_payload.get('audio'))
if is_video is True:
print(discord_file)
await ctx.send(embed=embed, file=discord_file)
else:
temp_media = json_payload.get('image')
embed.set_image(url=temp_media)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Utilities(bot))
|
the-stack_106_31694 | import tensorflow as tf
from utility.tf_utils import tensor2numpy
from core.tf_config import build
from core.decorator import override
from core.mixin import Memory
from algo.ppo.base import PPOBase, collect
def get_data_format(*, env, batch_size, sample_size=None,
store_state=False, state_size=None, **kwargs):
obs_dtype = tf.uint8 if len(env.obs_shape) == 3 else tf.float32
action_dtype = tf.int32 if env.is_action_discrete else tf.float32
data_format = dict(
obs=((None, sample_size, *env.obs_shape), obs_dtype),
action=((None, sample_size, *env.action_shape), action_dtype),
value=((None, sample_size), tf.float32),
traj_ret=((None, sample_size), tf.float32),
advantage=((None, sample_size), tf.float32),
logpi=((None, sample_size), tf.float32),
mask=((None, sample_size), tf.float32),
)
if store_state:
dtype = tf.keras.mixed_precision.experimental.global_policy().compute_dtype
data_format.update({
k: ((batch_size, v), dtype)
for k, v in state_size._asdict().items()
})
return data_format
class Agent(Memory, PPOBase):
""" Initialization """
@override(PPOBase)
def _add_attributes(self, env, dataset):
super()._add_attributes(env, dataset)
self._setup_memory_state_record()
@override(PPOBase)
def _build_train(self, env):
# Explicitly instantiate tf.function to avoid unintended retracing
TensorSpecs = dict(
obs=((self._sample_size, *env.obs_shape), env.obs_dtype, 'obs'),
action=((self._sample_size, *env.action_shape), env.action_dtype, 'action'),
value=((self._sample_size,), tf.float32, 'value'),
traj_ret=((self._sample_size,), tf.float32, 'traj_ret'),
advantage=((self._sample_size,), tf.float32, 'advantage'),
logpi=((self._sample_size,), tf.float32, 'logpi'),
mask=((self._sample_size,), tf.float32, 'mask'),
)
if self._store_state:
state_type = type(self.model.state_size)
TensorSpecs['state'] = state_type(*[((sz, ), self._dtype, name)
for name, sz in self.model.state_size._asdict().items()])
if self._additional_rnn_inputs:
if 'prev_action' in self._additional_rnn_inputs:
TensorSpecs['prev_action'] = (
(self._sample_size, *env.action_shape),
env.action_dtype, 'prev_action')
if 'prev_reward' in self._additional_rnn_inputs:
TensorSpecs['prev_reward'] = (
(self._sample_size,), self._dtype, 'prev_reward') # this reward should be unnormlaized
self.train = build(self._learn, TensorSpecs)
""" Call """
# @override(PPOBase)
def _process_input(self, env_output, evaluation):
obs, kwargs = super()._process_input(env_output, evaluation)
mask = 1. - env_output.reset
kwargs = self._add_memory_state_to_kwargs(obs, mask, kwargs=kwargs)
return obs, kwargs
# @override(PPOBase)
def _process_output(self, obs, kwargs, out, evaluation):
out = self._add_tensors_to_terms(obs, kwargs, out, evaluation)
out = super()._process_output(obs, kwargs, out, evaluation)
out = self._add_non_tensors_to_terms(out, kwargs, evaluation)
return out
""" PPO methods """
# @override(PPOBase)
def compute_value(self, obs=None, state=None, mask=None, prev_reward=None, return_state=False):
# be sure obs is normalized if obs normalization is required
if obs is None:
obs = self._env_output.obs
if state is None:
state = self._state
if mask is None:
mask = 1. - self._env_output.reset
kwargs = self._add_memory_state_to_kwargs(
obs, mask, state=state, prev_reward=prev_reward)
kwargs['return_state'] = return_state
out = self.model.compute_value(obs, **kwargs)
return tensor2numpy(out)
|
the-stack_106_31695 | """
This module contains external toolkit wrappers that are required by the
main offpele modules.
"""
import importlib
from distutils.spawn import find_executable
import tempfile
import os
import subprocess
from collections import defaultdict
from pathlib import Path
from copy import deepcopy
import numpy as np
from simtk import unit
from offpele.utils import temporary_cd, get_data_file_path
class ToolkitUnavailableException(Exception):
"""The requested toolkit is unavailable."""
pass
class ChargeCalculationError(Exception):
"""An external error when calculating charges"""
pass
class ChargeMethodUnavailableError(Exception):
"""A toolkit does not support the requested partial_charge_method combination"""
pass
class ToolkitWrapper(object):
"""
Toolkit wrapper base class.
"""
_is_available = None
_toolkit_name = None
@property
def toolkit_name(self):
"""
The name of the toolkit.
Returns
-------
toolkit_name : str
The name of this ToolkitWrapper object
"""
return self._toolkit_name
@staticmethod
def is_available():
"""
Check whether the corresponding toolkit can be imported
Returns
-------
is_installed : bool
True if corresponding toolkit is installed, False otherwise.
"""
return NotImplementedError
class RDKitToolkitWrapper(ToolkitWrapper):
"""
RDKitToolkitWrapper class.
"""
_toolkit_name = 'RDKit Toolkit'
def __init__(self):
"""
It initializes a RDKitToolkitWrapper object.
"""
super().__init__()
if not self.is_available():
raise ToolkitUnavailableException(
'The required toolkit {} is not '.format(self.toolkit_name)
+ 'available.')
@staticmethod
def is_available():
"""
Check whether the RDKit toolkit can be imported
Returns
-------
is_installed : bool
True if RDKit is installed, False otherwise.
"""
try:
importlib.import_module('rdkit', 'Chem')
return True
except ImportError:
return False
def from_pdb(self, path):
"""
It initializes an RDKit's Molecule object from a PDB file.
Parameters
----------
path : str
The path to the molecule's PDB file
Returns
-------
molecule : an rdkit.Chem.rdchem.Mol object
The RDKit's Molecule object
"""
from rdkit import Chem
return Chem.rdmolfiles.MolFromPDBFile(path, removeHs=False)
def from_smiles(self, smiles):
"""
It initializes an RDKit's Molecule object from a SMILES tag.
Parameters
----------
smiles : str
The SMILES tag to construct the molecule structure with.
Returns
-------
molecule : an rdkit.Chem.rdchem.Mol object
The RDKit's Molecule object
"""
from rdkit.Chem import AllChem as Chem
molecule = Chem.MolFromSmiles(smiles)
# Add hydrogens to molecule
molecule = Chem.AddHs(molecule)
# Generate 3D coordinates
Chem.EmbedMolecule(molecule)
return molecule
def assign_connectivity_from_template(self, molecule):
"""
It assigns the connectivity to an RDKit molecule according to the
connectivity from an RDKit connectivity template.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
"""
from rdkit.Chem import AllChem
if molecule.connectivity_template is None:
raise ValueError('A connectivity template must be previously '
+ 'assigned to the molecule')
rdkit_molecule = molecule.rdkit_molecule
rdkit_molecule = AllChem.AssignBondOrdersFromTemplate(
molecule.connectivity_template, rdkit_molecule)
molecule._rdkit_molecule = rdkit_molecule
def assign_stereochemistry_from_3D(self, molecule):
"""
It assigns the stereochemistry to an RDKit molecule according to the
3D coordinates in the PDB structure.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
"""
from rdkit import Chem
rdkit_molecule = molecule.rdkit_molecule
Chem.rdmolops.AssignStereochemistryFrom3D(rdkit_molecule)
def set_conformer(self, molecule, conformer):
"""
It sets a new conformation to the molecule.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
conformer : an RDKit.Chem.rdchem.Conformer object
The conformer to set to the molecule
"""
rdkit_molecule = molecule.rdkit_molecule
# Remove previous conformer
rdkit_molecule.RemoveAllConformers()
# Add current conformer
rdkit_molecule.AddConformer(conformer, assignId=True)
def get_residue_name(self, molecule):
"""
It returns the name of the residue according to the RDKit molecule
object.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
residue_name : str
The name of the residue
"""
rdkit_molecule = molecule.rdkit_molecule
first_atom = list(rdkit_molecule.GetAtoms())[0]
# Catch a None return
try:
residue_name = first_atom.GetPDBResidueInfo().GetResidueName()
except AttributeError:
residue_name = None
return residue_name
def get_atom_names(self, molecule):
"""
It returns the ordered list of atom names according to the
RDKit molecule object. In case no atom names are available
(non-PDB source), it assignes a name to each atom considering
the element and an index obtained from the total number of
occurrences of each element.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
residue_name : list[str]
The list of atom names
"""
rdkit_molecule = molecule.rdkit_molecule
atom_names = list()
occurrences = dict()
for atom in rdkit_molecule.GetAtoms():
pdb_info = atom.GetPDBResidueInfo()
if pdb_info is not None:
atom_names.append(pdb_info.GetName())
else:
element = atom.GetSymbol()
occurrences[element] = occurrences.get(element, 0) + 1
atom_names.append('{:^4}'.format(str(element)
+ str(occurrences[element])))
return atom_names
def to_pdb_file(self, molecule, path):
"""
It writes the RDKit molecule to a PDB file.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
path : str
Path to write to
"""
from rdkit import Chem
assert Path(path).suffix == '.pdb', 'Wrong extension'
rdkit_molecule = molecule.rdkit_molecule
pdb_block = Chem.rdmolfiles.MolToPDBBlock(rdkit_molecule)
names = molecule.get_pdb_atom_names()
tag = molecule.tag
renamed_pdb_block = ''
atom_counter = 0
for line in pdb_block.split('\n'):
if line.startswith('HETATM'):
renamed_pdb_block += line[:12] + names[atom_counter] \
+ ' ' + tag + line[20:] + '\n'
atom_counter += 1
else:
renamed_pdb_block += line + '\n'
with open(path, 'w') as f:
f.write(renamed_pdb_block)
def to_sdf_file(self, molecule, path):
"""
It writes the RDKit molecule to an sdf file.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
path : str
Path to write to
"""
from rdkit import Chem
assert Path(path).suffix == '.sdf', 'Wrong extension'
rdkit_molecule = molecule.rdkit_molecule
with open(path, 'w') as f:
writer = Chem.SDWriter(f)
writer.write(rdkit_molecule)
writer.close()
def to_xyz_file(self, molecule, path):
"""
It writes the RDKit molecule to an xyz file.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
path : str
Path to write to
"""
from rdkit import Chem
assert Path(path).suffix == '.xyz', 'Wrong extension'
rdkit_molecule = molecule.rdkit_molecule
Chem.MolToXYZFile(rdkit_molecule, path)
def get_atom_ids_with_rotatable_bonds(self, molecule):
"""
It returns the atom ids with rotatable bonds according to the
RDKit molecule.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
rot_bonds_atom_ids : tuple[tuple[int, int]]
The set of atom id pairs that belong to rotatable bonds
"""
from rdkit import Chem
rdkit_molecule = deepcopy(molecule.rdkit_molecule)
rot_bonds_atom_ids = set([
frozenset(atom_pair) for atom_pair in
rdkit_molecule.GetSubstructMatches(
Chem.MolFromSmarts('[!$([NH]!@C(=O))&!D1&!$(*#*)]-&!@[!$([NH]!@C(=O))&!D1&!$(*#*)]'))])
# Include missing rotatable bonds for amide groups
for atom_pair in [frozenset(atom_pair) for atom_pair in
rdkit_molecule.GetSubstructMatches(
Chem.MolFromSmarts('[$(N!@C(=O))]-&!@[!$(C(=O))&!D1&!$(*#*)]'))]:
rot_bonds_atom_ids.add(atom_pair)
# Remove bonds to terminal -CH3
# To do, it is not working, fix it!
if molecule.exclude_terminal_rotamers:
terminal_bonds = set([
frozenset(atom_pair) for atom_pair in
rdkit_molecule.GetSubstructMatches(
Chem.MolFromSmarts('*-&!@[$([C;H3;X4]),$([N;H2;X3]),$([N;H3;X4]),$([O;H1;X2])]'))
])
rot_bonds_atom_ids = rot_bonds_atom_ids.difference(terminal_bonds)
return list(rot_bonds_atom_ids)
def get_coordinates(self, molecule):
"""
It returns the 3D coordinates of all atoms in the RDKit molecule.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
coordinates : numpy.ndarray
The array of 3D coordinates of all the atoms in the molecule
"""
rdkit_molecule = molecule.rdkit_molecule
conformer = rdkit_molecule.GetConformer()
return conformer.GetPositions()
def get_2D_representation(self, molecule):
"""
It returns the 2D representation of the RDKit molecule.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
representation_2D : an RDKit.molecule object
It is an RDKit molecule with an embeded 2D representation
"""
from rdkit.Chem import AllChem
rdkit_molecule = molecule.rdkit_molecule
representation_2D = deepcopy(rdkit_molecule)
AllChem.Compute2DCoords(representation_2D)
return representation_2D
class AmberToolkitWrapper(ToolkitWrapper):
"""
AmberToolkitWrapper class.
"""
_toolkit_name = 'Amber Toolkit'
def __init__(self):
"""
It initializes a AmberToolkitWrapper object.
"""
super().__init__()
if not self.is_available():
raise ToolkitUnavailableException(
'The required toolkit {} is not '.format(self.toolkit_name)
+ 'available.')
self._rdkit_toolkit_wrapper = RDKitToolkitWrapper()
@staticmethod
def is_available():
"""
Check whether the AmberTools toolkit is installed
Returns
-------
is_installed : bool
True if AmberTools is installed, False otherwise.
"""
ANTECHAMBER_PATH = find_executable("antechamber")
if ANTECHAMBER_PATH is None:
return False
if not(RDKitToolkitWrapper.is_available()):
return False
return True
def compute_partial_charges(self, molecule, method='am1bcc'):
"""
It computes the partial charges using antechamber.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
method : str
The name of the method to use. One of ['gasteiger', 'am1bcc'].
If None, 'am1bcc' will be used
Returns
-------
charges : simtk.unit.Quantity
The array of partial charges
Raises
------
ChargeMethodUnavailableError if the requested charge method can not
be handled by this toolkit
ChargeCalculationError if the charge method is supported by this
toolkit, but fails
"""
SUPPORTED_CHARGE_METHODS = {'am1bcc': {'antechamber_keyword': 'bcc'},
'gasteiger': {'antechamber_keyword': 'gas'}
}
if method not in SUPPORTED_CHARGE_METHODS:
raise ChargeMethodUnavailableError(
'partial_charge_method '
+ '{} is not available from '.format(method)
+ 'AmberToolsToolkitWrapper. Available charge methods are '
+ list(SUPPORTED_CHARGE_METHODS.keys()))
off_molecule = molecule.off_molecule
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
net_charge = off_molecule.total_charge / \
unit.elementary_charge
self._rdkit_toolkit_wrapper.to_sdf_file(
molecule, tmpdir + '/molecule.sdf')
subprocess.check_output([
"antechamber", "-i", "molecule.sdf", "-fi", "sdf",
"-o", "charged.ac", "-fo", "ac", "-pf", "yes", "-dr", "n",
"-c",
SUPPORTED_CHARGE_METHODS[method]['antechamber_keyword'],
"-nc", str(net_charge)])
# Write out just charges
subprocess.check_output([
"antechamber", "-dr", "n", "-i", "charged.ac", "-fi", "ac",
"-o", "charged2.ac", "-fo", "ac", "-c", "wc",
"-cf", "charges.txt", "-pf", "yes"])
if not os.path.exists('charges.txt'):
# TODO: copy files into local directory to aid debugging?
raise ChargeCalculationError(
"Antechamber/sqm partial charge calculation failed on "
"molecule {} (SMILES {})".format(
off_molecule.name, off_molecule.to_smiles()))
# Read the charges
with open('charges.txt', 'r') as infile:
contents = infile.read()
text_charges = contents.split()
charges = np.zeros([off_molecule.n_atoms], np.float64)
for index, token in enumerate(text_charges):
charges[index] = float(token)
charges = unit.Quantity(charges, unit.elementary_charge)
assert len(charges) == len(molecule.rdkit_molecule.GetAtoms()), \
'Partial charge computation failed as the length of ' \
+ 'resulting partial charges does not match with the ' \
+ 'number of atoms in molecule'
return charges
class OpenForceFieldToolkitWrapper(ToolkitWrapper):
"""
OpenForceFieldToolkitWrapper class.
"""
_toolkit_name = 'OpenForceField Toolkit'
def __init__(self):
"""
It initializes a OpenForceFieldToolkitWrapper object.
"""
super().__init__()
if not self.is_available():
raise ToolkitUnavailableException(
'The required toolkit {} is not '.format(self.toolkit_name)
+ 'available.')
@staticmethod
def is_available():
"""
Check whether the OpenForceField toolkit is installed
Returns
-------
is_installed : bool
True if OpenForceField is installed, False otherwise.
"""
try:
importlib.import_module('openforcefield')
return True
except ImportError:
return False
def from_rdkit(self, molecule):
"""
It initializes an OpenForceField's Molecule object from an RDKit
molecule.
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
molecule : an openforcefield.topology.Molecule object
The OpenForceField's Molecule
"""
from openforcefield.topology.molecule import Molecule
rdkit_molecule = molecule.rdkit_molecule
return Molecule.from_rdkit(rdkit_molecule)
def get_forcefield(self, forcefield_name):
"""
It returns the OpenForceField's object that matches with the name
that is supplied.
Parameters
----------
forcefield_name : str
The name of the requested forcefield
Returns
-------
forcefield : an openforcefield.typing.engines.smirnoff.ForceField
object
The OpenForceField's forcefield
"""
from openforcefield.typing.engines.smirnoff import ForceField
if isinstance(forcefield_name, str):
forcefield = ForceField(forcefield_name)
else:
raise Exception('Invalid forcefield type')
return forcefield
def get_parameters_from_forcefield(self, forcefield, molecule):
"""
It returns the parameters that are obtained with the supplied
forcefield for a certain offpele's molecule.
Parameters
----------
forcefield : str or an openforcefield.typing.engines.smirnoff.ForceField
object
The forcefield from which the parameters will be obtained
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
openforcefield_parameters : an OpenForceFieldParameters object
The OpenForceFieldParameters object
"""
from openforcefield.typing.engines.smirnoff import ForceField
from openforcefield.topology import Topology
off_molecule = molecule.off_molecule
topology = Topology.from_molecules([off_molecule])
if isinstance(forcefield, str):
forcefield = ForceField(forcefield)
elif isinstance(forcefield, ForceField):
pass
else:
raise Exception('Invalid forcefield type')
molecule_parameters_list = forcefield.label_molecules(topology)
assert len(molecule_parameters_list) == 1, 'A single molecule is ' \
'expected'
return self.OpenForceFieldParameters(molecule_parameters_list[0])
def get_parameter_handler_from_forcefield(self, parameter_handler_name,
forcefield):
"""
It returns a parameter handler from the forcefield based on its
name.
Parameters
----------
parameter_handler_name : str
The name of the parameter handler that is requested
forcefield : an openforcefield.typing.engines.smirnoff.ForceField
object
The forcefield from which the parameter handler will be obtained
Returns
-------
parameter_handler : an openforcefield.typing.engines.smirnoff.parameters.ParameterHandler
object
The ParameterHandler that was requested
"""
from openforcefield.typing.engines.smirnoff import ForceField
if isinstance(forcefield, str):
forcefield = ForceField(forcefield)
elif isinstance(forcefield, ForceField):
pass
else:
raise Exception('Invalid forcefield type')
return forcefield.get_parameter_handler(parameter_handler_name)
class OpenForceFieldParameters(dict):
"""
OpenForceFieldParameters class that inherits from dict.
"""
def __init__(self, parameters_list):
"""
It initializes an OpenForceFieldParameters object.
parameters_list : dict
A dictionary keyed by force type
"""
for key, value in parameters_list.items():
self[key] = value
def sigmas_from_rmin_halves(func):
"""
It converts rmin_half values to sigmas according to:
http://ambermd.org/Questions/vdwequation.pdf
"""
FACTOR = 0.8908987181403393 # The inverse of the sixth root of 2
def function_wrapper(x):
rmin_halves = func(x)
sigmas = dict()
for indexes, rmin_half in rmin_halves.items():
sigma = FACTOR * 2 * rmin_half
sigmas[indexes] = sigma
return sigmas
return function_wrapper
def __str__(self):
"""
It returns the readable representation string of this object.
Returns
-------
output : str
The readable representation string
"""
output = ''
for force_tag, force_dict in self.items():
output += f"\n{force_tag}:\n"
for (atom_indices, parameter) in force_dict.items():
atomstr = ''
for idx in atom_indices:
atomstr += '%3s' % idx
output += " - atoms: %s parameter_id: %s smirks %s\n" % \
(atomstr, parameter.id, parameter.smirks)
return output
def _build_dict(self, parameters, attribute_name):
"""
It builds the dictionary of parameters of a specific force type.
Parameters
----------
parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters of a specific force type grouped by tuples
with the atom ids that the parameters belong to
attribute_name : str
The name of the attribute that is requested
Returns
-------
value_by_index : dict[tuple, parameter_value]
The parameter values that were requested grouped by the atom
ids they belong to (arranged as a tuple)
"""
if parameters:
value_by_index = dict()
for index, parameter in parameters.items():
value_by_index[index] = getattr(parameter, attribute_name)
return value_by_index
def _build_dynamic_dicts(self, parameters, attr_core_name):
"""
It builds the dynamically the dictionaries of parameters of a
specific force type.
It works with the same idea as _build_dict(), however it can
handle multiple dictionary definitions were consecutive
parameters of the same type are found in the force type's
parameters dictionary. It works, for example, with the multiple
proper and improper definitions found in the OpenForceField API.
More information in the <ProperTorsions> and <ImproperTorsions>
sections at: https://open-forcefield-toolkit.readthedocs.io/en/0.7.0/smirnoff.html
Parameters
----------
parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters of a specific force type grouped by tuples
with the atom ids that the parameters belong to
attribute_name : str
The name of the attribute that is requested
Returns
-------
value_by_index : dict[tuple, parameter_value]
The parameter values that were requested grouped by the atom
ids they belong to (arranged as a tuple) """
if parameters:
parameters_by_index = defaultdict(dict)
all_attr_ids_found = set()
for index, parameter in parameters.items():
counter = int(1)
attr_name = attr_core_name + str(counter)
while(attr_name in parameter.to_dict()):
all_attr_ids_found.add(counter)
attr_value = getattr(parameter, attr_name)
parameters_by_index[index][counter] = attr_value
counter += 1
attr_name = attr_core_name + str(counter)
output_list = list()
for attr_id in sorted(all_attr_ids_found):
value_by_index = dict()
for index in parameters.keys():
if attr_id in parameters_by_index[index]:
value_by_index[index] = \
parameters_by_index[index][attr_id]
else:
value_by_index[index] = None
output_list.append(value_by_index)
return output_list
# Van der Waals parameters
def get_vdW_parameters(self):
"""
It returns the parameters that belong to the van der Waals force
type.
Returns
-------
vdW_parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters grouped by the atom ids they belong to
(arranged as tuples)
"""
if 'vdW' in self:
return self['vdW']
def get_vdW_sigmas(self):
"""
It gets the sigma values of the parameterized molecule.
Returns
-------
sigmas : dict[tuple[int], simtk.unit.Quantity]
The dictionary of sigma values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_vdW_parameters()
return self._build_dict(parameters, 'sigma')
def get_vdW_epsilons(self):
"""
It gets the epsilon values of the parameterized molecule.
Returns
-------
epsilons : dict[tuple[int], simtk.unit.Quantity]
The dictionary of epsilon values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_vdW_parameters()
return self._build_dict(parameters, 'epsilon')
def get_vdW_rmin_halves(self):
"""
It gets the rmin half values of the parameterized molecule.
Returns
-------
rmin_halves : dict[tuple[int], simtk.unit.Quantity]
The dictionary of rmin half values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_vdW_parameters()
return self._build_dict(parameters, 'rmin_half')
@sigmas_from_rmin_halves
def get_vdW_sigmas_from_rmin_halves(self):
"""
It gets the rmin half values of the parameterized molecule.
Then, a decorator converts them into sigmas.
Returns
-------
sigmas : dict[tuple[int], simtk.unit.Quantity]
The dictionary of sigma values grouped by the atom ids
they belong to (arranged as tuples)
"""
return self.get_vdW_rmin_halves()
# Bond parameters
def get_bond_parameters(self):
"""
It returns the parameters that belong to the bonding force type.
Returns
-------
bond_parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters grouped by the atom ids they belong to
(arranged as tuples)
"""
if 'Bonds' in self:
return self['Bonds']
def get_bond_lengths(self):
"""
It gets the bond length values of the parameterized molecule.
Returns
-------
bond_lengths : dict[tuple[int], simtk.unit.Quantity]
The dictionary of bond length values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_bond_parameters()
return self._build_dict(parameters, 'length')
def get_bond_ks(self):
"""
It gets the bond k values of the parameterized molecule.
Returns
-------
bond_ks : dict[tuple[int], simtk.unit.Quantity]
The dictionary of bond k values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_bond_parameters()
return self._build_dict(parameters, 'k')
# Angle parameters
def get_angle_parameters(self):
"""
It returns the parameters that belong to the angular force type.
Returns
-------
angle_parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters grouped by the atom ids they belong to
(arranged as tuples)
"""
if 'Angles' in self:
return self['Angles']
def get_angle_angles(self):
"""
It gets the angle values of the parameterized molecule.
Returns
-------
angles : dict[tuple[int], simtk.unit.Quantity]
The dictionary of angle values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_angle_parameters()
return self._build_dict(parameters, 'angle')
def get_angle_ks(self):
"""
It gets the angle k values of the parameterized molecule.
Returns
-------
angle_ks : dict[tuple[int], simtk.unit.Quantity]
The dictionary of angle k values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_angle_parameters()
return self._build_dict(parameters, 'k')
# Dihedral parameters
def get_dihedral_parameters(self):
"""
It returns the parameters that belong to the proper dihedrals
force type.
Returns
-------
proper_parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters grouped by the atom ids they belong to
(arranged as tuples)
"""
if 'ProperTorsions' in self:
return self['ProperTorsions']
def get_dihedral_periodicities(self):
"""
It gets the dihedral periodicity values of the parameterized
molecule.
Returns
-------
dihedral_periodicities : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of dihedral periodicity values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_dihedral_parameters()
return self._build_dynamic_dicts(parameters, 'periodicity')
def get_dihedral_phases(self):
"""
It gets the dihedral phase values of the parameterized
molecule.
Returns
-------
dihedral_phases : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of dihedral phase values grouped
by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_dihedral_parameters()
return self._build_dynamic_dicts(parameters, 'phase')
def get_dihedral_ks(self):
"""
It gets the dihedral periodicity values of the parameterized
molecule.
Returns
-------
dihedral_periodicities : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of dihedral periodicity values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_dihedral_parameters()
return self._build_dynamic_dicts(parameters, 'k')
def get_dihedral_idivfs(self):
"""
It gets the dihedral idivf values of the parameterized
molecule.
Returns
-------
dihedral_idivfs : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of dihedral idivf values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_dihedral_parameters()
return self._build_dynamic_dicts(parameters, 'idivf')
# Improper parameters
def get_improper_parameters(self):
"""
It returns the parameters that belong to the improper dihedrals
force type.
Returns
-------
improper_parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters grouped by the atom ids they belong to
(arranged as tuples)
"""
if 'ImproperTorsions' in self:
return self['ImproperTorsions']
def get_improper_periodicities(self):
"""
It gets the improper periodicity values of the parameterized
molecule.
Returns
-------
improper_periodicities : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of improper periodicity values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_improper_parameters()
return self._build_dynamic_dicts(parameters, 'periodicity')
def get_improper_phases(self):
"""
It gets the improper phase values of the parameterized
molecule.
Returns
-------
improper_phases : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of improper phase values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_improper_parameters()
return self._build_dynamic_dicts(parameters, 'phase')
def get_improper_ks(self):
"""
It gets the improper k values of the parameterized
molecule.
Returns
-------
improper_ks : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of improper k values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_improper_parameters()
return self._build_dynamic_dicts(parameters, 'k')
def get_improper_idivfs(self):
"""
It gets the improper idivf values of the parameterized
molecule.
Returns
-------
improper_idivfs : list[dict[tuple[int], simtk.unit.Quantity]]
The list of dictionaries of improper idivf values
grouped by the atom ids they belong to (arranged as tuples)
"""
parameters = self.get_improper_parameters()
return self._build_dynamic_dicts(parameters, 'idivf')
# GBSA solvent parameters
def get_GBSA_parameters(self):
"""
It returns the parameters that belong to the GBSA force type.
Returns
-------
GSBA_parameters : dict[tuple, openforcefield.typing.engines.smirnoff.parameters.ParameterHandler]
The parameters grouped by the atom ids they belong to
(arranged as tuples)
"""
if 'GBSA' in self:
return self['GBSA']
def get_GBSA_radii(self):
"""
It gets the GBSA radius values of the parameterized molecule.
Returns
-------
GBSA_radii : dict[tuple[int], simtk.unit.Quantity]
The dictionary of GBSA radius values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_GBSA_parameters()
return self._build_dict(parameters, 'radius')
def get_GBSA_scales(self):
"""
It gets the GBSA scale values of the parameterized molecule.
Returns
-------
GBSA_scales : dict[tuple[int], simtk.unit.Quantity]
The dictionary of GBSA scale values grouped by the atom ids
they belong to (arranged as tuples)
"""
parameters = self.get_GBSA_parameters()
return self._build_dict(parameters, 'scale')
class SchrodingerToolkitWrapper(ToolkitWrapper):
"""
SchrodingerToolkitWrapper class.
"""
_toolkit_name = 'Schrodinger Toolkit'
def __init__(self):
"""
It initializes a SchrodingerToolkitWrapper object.
"""
super().__init__()
if "SCHRODINGER" not in os.environ:
import logging
logging.warning("Schrodinger Toolkit requires the environment "
+ "variable SCHRODINGER to be previously set, "
+ "pointing to the Schrodinger's installation "
+ "path. For more information, please, refer to "
+ "https://martimunicoy.github.io/offpele/installation.html#external-dependencies",
)
if not self.is_available():
raise ToolkitUnavailableException(
'The required toolkit {} is not '.format(self.toolkit_name)
+ 'available.')
self._rdkit_toolkit_wrapper = RDKitToolkitWrapper()
@staticmethod
def is_available():
"""
Check whether the OpenForceField toolkit is installed
Returns
-------
is_installed : bool
True if OpenForceField is installed, False otherwise.
"""
if not(RDKitToolkitWrapper.is_available()):
return False
if SchrodingerToolkitWrapper.path_to_ffld_server() is None:
return False
return True
@staticmethod
def path_to_ffld_server():
FFLD_SERVER_PATH = find_executable("ffld_server")
if FFLD_SERVER_PATH is not None:
return FFLD_SERVER_PATH
else:
if "SCHRODINGER" in os.environ:
schrodinger_root = os.environ.get('SCHRODINGER')
return os.path.join(schrodinger_root,
'utilities', 'ffld_server')
return None
def get_OPLS_parameters(self, molecule):
"""
It calls Schrodinger's ffld_server to parameterize a molecule
with OPLS.
.. todo ::
* Review PlopRotTemp's atom type fixes. Should we apply them here?
Parameters
----------
molecule : an offpele.topology.Molecule
The offpele's Molecule object
Returns
-------
OPLS_params : an OPLSParameters object
The set of lists of parameters grouped by parameter type.
Thus, the dictionary has the following keys: atom_names,
atom_types, charges, sigmas, epsilons, SGB_radii, vdW_radii,
gammas, and alphas
"""
ffld_server_exec = self.path_to_ffld_server()
with tempfile.TemporaryDirectory() as tmpdir:
with temporary_cd(tmpdir):
self._rdkit_toolkit_wrapper.to_pdb_file(
molecule, tmpdir + '/molecule.pdb')
subprocess.check_output([ffld_server_exec,
"-ipdb", "molecule.pdb",
"-version", "14",
"-print_parameters",
"-out_file", "parameters.txt"])
OPLS_params = self._parse_parameters('parameters.txt')
self._add_solvent_parameters(OPLS_params)
return OPLS_params
def _parse_parameters(self, path_to_parameters):
"""
It parses the parameters from ffld_server's output file.
Parameters
----------
path_to_parameters : str
The path to the ffld_server's output file
Returns
-------
params : an OPLSParameters object
The set of lists of parameters grouped by parameter type.
Thus, the dictionary has the following keys: atom_names,
atom_types, charges, sigmas, and epsilons.
"""
params = defaultdict(list)
with open(path_to_parameters) as f:
section = 'out'
name_to_index = dict() # To pair atom names and indexes
for line in f:
if line.startswith('OPLSAA FORCE FIELD TYPE ASSIGNED'):
section = 'atoms'
# Skip the next 3 lines
f.readline()
f.readline()
f.readline()
elif line.startswith(' Stretch'):
section = 'bonds'
elif line.startswith(' Bending'):
section = 'angles'
elif line.startswith(' proper Torsion'):
section = 'propers'
elif line.startswith(' improper Torsion'):
section = 'impropers'
elif line == '\n':
continue
elif section == 'atoms':
if line.startswith('-'):
continue
fields = line.split()
assert len(fields) > 7, 'Unexpected number of fields ' \
+ 'found at line {}'.format(line)
name_to_index[line[0:4]] = len(params['atom_names'])
params['atom_names'].append(line[0:4])
params['atom_types'].append(fields[3])
params['charges'].append(
unit.Quantity(float(fields[4]),
unit.elementary_charge))
params['sigmas'].append(
unit.Quantity(float(fields[5]),
unit.angstrom))
params['epsilons'].append(
unit.Quantity(float(fields[6]),
unit.kilocalorie / unit.mole))
elif section == 'bonds':
fields = line.split()
assert len(fields) > 4, 'Unexpected number of fields ' \
+ 'found at line {}'.format(line)
params['bonds'].append(
{'atom1_idx': name_to_index[line[0:4]],
'atom2_idx': name_to_index[line[8:12]],
'spring_constant': unit.Quantity(
float(fields[2]), unit.kilocalorie
/ (unit.angstrom ** 2 * unit.mole)),
'eq_dist': unit.Quantity(float(fields[3]),
unit.angstrom)
})
elif section == 'angles':
fields = line.split()
assert len(fields) > 5, 'Unexpected number of fields ' \
+ 'found at line {}'.format(line)
params['angles'].append(
{'atom1_idx': name_to_index[line[0:4]],
'atom2_idx': name_to_index[line[8:12]],
'atom3_idx': name_to_index[line[16:20]],
'spring_constant': unit.Quantity(
float(fields[3]), unit.kilocalorie
/ (unit.radian ** 2 * unit.mole)),
'eq_angle': unit.Quantity(float(fields[4]),
unit.degrees)
})
return self.OPLSParameters(params)
def _add_solvent_parameters(self, OPLS_params):
"""
It add the solvent parameters to the OPLS parameters collection.
Parameters
----------
OPLS_params : an OPLSParameters object
The set of lists of parameters grouped by parameter type.
Thus, the dictionary has the following keys: atom_names,
atom_types, charges, sigmas, and epsilons. The following
solvent parameters will be added to the collection: SGB_radii,
vdW_radii, gammas, alphas
"""
solvent_data = dict()
parameters_path = get_data_file_path('parameters/f14_sgbnp.param')
with open(parameters_path) as f:
for line in f:
if line.startswith('#'):
continue
fields = line.split()
assert len(fields) > 7, 'Unexpected line with less than ' \
'8 fields at {}'.format(line)
atom_type = fields[1]
solvent_data[atom_type] = {
'SGB_radii': unit.Quantity(float(fields[4]),
unit.angstrom),
'vdW_radii': unit.Quantity(float(fields[5]),
unit.angstrom),
'gammas': float(fields[6]),
'alphas': float(fields[7])}
parameters_to_add = defaultdict(list)
tried = list()
for atom_type in OPLS_params['atom_types']:
parameters_found = False
while(not parameters_found):
if atom_type in solvent_data:
for label, value in solvent_data[atom_type].items():
parameters_to_add[label].append(value)
parameters_found = True
else:
new_atom_type = self._find_similar_atom_types(atom_type,
tried)
if new_atom_type is None:
atom_type = 'DF' # Set it to default
else:
tried.append(new_atom_type)
atom_type = new_atom_type
for label, params in parameters_to_add.items():
OPLS_params.add_parameters(label, params)
def _find_similar_atom_types(self, atom_type, tried):
"""
It tries to find a similar atom type, skipping the ones that have
already been tried. It uses the definitions from the
similarity.param file.
Parameters
----------
atom_type : str
The atom type from which similar atom types will be searched
tried : list[str]
The list of atom types that have already been tried and
will be skipped
Returns
-------
new_atom_type : str
The most similar atom type that has been found, if any.
Otherwise, it returns None
"""
new_atom_type = None
best_similarity = 0
similarity_path = get_data_file_path('parameters/similarity.param')
with open(similarity_path) as f:
for line in f:
fields = line.split()
assert len(fields) > 2, 'Unexpected number of fields at ' \
+ 'line {}'.format(line)
atom_type1, atom_type2, similarity = fields[0:3]
if (atom_type == atom_type1
and float(similarity) > best_similarity
and atom_type2 not in tried):
best_similarity = float(similarity)
new_atom_type = atom_type2
elif (atom_type == atom_type2
and float(similarity) > best_similarity
and atom_type1 not in tried):
best_similarity = float(similarity)
new_atom_type = atom_type1
return new_atom_type
class OPLSParameters(dict):
"""
OPLSParameters class that inherits from dict.
"""
def __init__(self, parameters):
"""
It initializes an OPLSParameters object.
parameters : dict
A dictionary keyed by parameter type
"""
for key, value in parameters.items():
self[key] = value
def add_parameters(self, label, parameters):
"""
It adds a list of parameters of the same type to the collection.
Parameters
----------
label : str
The label to describe the parameter type
parameters : list
The list of parameters to include to the collection
"""
self[label] = parameters
|
the-stack_106_31696 | """
One of the really important features of |jedi| is to have an option to
understand code like this::
def foo(bar):
bar. # completion here
foo(1)
There's no doubt wheter bar is an ``int`` or not, but if there's also a call
like ``foo('str')``, what would happen? Well, we'll just show both. Because
that's what a human would expect.
It works as follows:
- |Jedi| sees a param
- search for function calls named ``foo``
- execute these calls and check the input. This work with a ``ParamListener``.
"""
from itertools import chain
from jedi._compatibility import unicode
from jedi.parser import tree
from jedi import settings
from jedi import debug
from jedi.evaluate.cache import memoize_default
from jedi.evaluate import imports
class ParamListener(object):
"""
This listener is used to get the params for a function.
"""
def __init__(self):
self.param_possibilities = []
def execute(self, params):
self.param_possibilities += params
@debug.increase_indent
def search_params(evaluator, param):
"""
A dynamic search for param values. If you try to complete a type:
>>> def func(foo):
... foo
>>> func(1)
>>> func("")
It is not known what the type ``foo`` without analysing the whole code. You
have to look for all calls to ``func`` to find out what ``foo`` possibly
is.
"""
if not settings.dynamic_params:
return []
func = param.get_parent_until(tree.Function)
debug.dbg('Dynamic param search for %s in %s.', param, str(func.name))
# Compare the param names.
names = [n for n in search_function_call(evaluator, func)
if n.value == param.name.value]
# Evaluate the ExecutedParams to types.
result = list(chain.from_iterable(n.parent.eval(evaluator) for n in names))
debug.dbg('Dynamic param result %s', result)
return result
@memoize_default([], evaluator_is_first_arg=True)
def search_function_call(evaluator, func):
"""
Returns a list of param names.
"""
from jedi.evaluate import representation as er
def get_params_for_module(module):
"""
Returns the values of a param, or an empty array.
"""
@memoize_default([], evaluator_is_first_arg=True)
def get_posibilities(evaluator, module, func_name):
try:
names = module.used_names[func_name]
except KeyError:
return []
for name in names:
parent = name.parent
if tree.is_node(parent, 'trailer'):
parent = parent.parent
trailer = None
if tree.is_node(parent, 'power'):
for t in parent.children[1:]:
if t == '**':
break
if t.start_pos > name.start_pos and t.children[0] == '(':
trailer = t
break
if trailer is not None:
types = evaluator.goto_definition(name)
# We have to remove decorators, because they are not the
# "original" functions, this way we can easily compare.
# At the same time we also have to remove InstanceElements.
undec = []
for escope in types:
if escope.isinstance(er.Function, er.Instance) \
and escope.decorates is not None:
undec.append(escope.decorates)
elif isinstance(escope, er.InstanceElement):
undec.append(escope.var)
else:
undec.append(escope)
if evaluator.wrap(compare) in undec:
# Only if we have the correct function we execute
# it, otherwise just ignore it.
evaluator.eval_trailer(types, trailer)
return listener.param_possibilities
return get_posibilities(evaluator, module, func_name)
current_module = func.get_parent_until()
func_name = unicode(func.name)
compare = func
if func_name == '__init__':
cls = func.get_parent_scope()
if isinstance(cls, tree.Class):
func_name = unicode(cls.name)
compare = cls
# add the listener
listener = ParamListener()
func.listeners.add(listener)
try:
result = []
# This is like backtracking: Get the first possible result.
for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name):
result = get_params_for_module(mod)
if result:
break
finally:
# cleanup: remove the listener; important: should not stick.
func.listeners.remove(listener)
return result
|
the-stack_106_31698 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a format decision state object that manages whitespace decisions.
Each token is processed one at a time, at which point its whitespace formatting
decisions are made. A graph of potential whitespace formattings is created,
where each node in the graph is a format decision state object. The heuristic
tries formatting the token with and without a newline before it to determine
which one has the least penalty. Therefore, the format decision state object for
each decision needs to be its own unique copy.
Once the heuristic determines the best formatting, it makes a non-dry run pass
through the code to commit the whitespace formatting.
FormatDecisionState: main class exported by this module.
"""
from yapf.yapflib import format_token
from yapf.yapflib import object_state
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapf.yapflib import unwrapped_line
class FormatDecisionState(object):
"""The current state when indenting an unwrapped line.
The FormatDecisionState object is meant to be copied instead of referenced.
Attributes:
first_indent: The indent of the first token.
column: The number of used columns in the current line.
next_token: The next token to be formatted.
paren_level: The level of nesting inside (), [], and {}.
lowest_level_on_line: The lowest paren_level on the current line.
stack: A stack (of _ParenState) keeping track of properties applying to
parenthesis levels.
comp_stack: A stack (of ComprehensionState) keeping track of properties
applying to comprehensions.
param_list_stack: A stack (of ParameterListState) keeping track of
properties applying to function parameter lists.
ignore_stack_for_comparison: Ignore the stack of _ParenState for state
comparison.
"""
def __init__(self, line, first_indent):
"""Initializer.
Initializes to the state after placing the first token from 'line' at
'first_indent'.
Arguments:
line: (UnwrappedLine) The unwrapped line we're currently processing.
first_indent: (int) The indent of the first token.
"""
self.next_token = line.first
self.column = first_indent
self.line = line
self.paren_level = 0
self.lowest_level_on_line = 0
self.ignore_stack_for_comparison = False
self.stack = [_ParenState(first_indent, first_indent)]
self.comp_stack = []
self.param_list_stack = []
self.first_indent = first_indent
self.column_limit = style.Get('COLUMN_LIMIT')
def Clone(self):
"""Clones a FormatDecisionState object."""
new = FormatDecisionState(self.line, self.first_indent)
new.next_token = self.next_token
new.column = self.column
new.line = self.line
new.paren_level = self.paren_level
new.line.depth = self.line.depth
new.lowest_level_on_line = self.lowest_level_on_line
new.ignore_stack_for_comparison = self.ignore_stack_for_comparison
new.first_indent = self.first_indent
new.stack = [state.Clone() for state in self.stack]
new.comp_stack = [state.Clone() for state in self.comp_stack]
new.param_list_stack = [state.Clone() for state in self.param_list_stack]
return new
def __eq__(self, other):
# Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous',
# because it shouldn't have a bearing on this comparison. (I.e., it will
# report equal if 'next_token' does.)
return (self.next_token == other.next_token and
self.column == other.column and
self.paren_level == other.paren_level and
self.line.depth == other.line.depth and
self.lowest_level_on_line == other.lowest_level_on_line and
(self.ignore_stack_for_comparison or
other.ignore_stack_for_comparison or self.stack == other.stack and
self.comp_stack == other.comp_stack and
self.param_list_stack == other.param_list_stack))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.next_token, self.column, self.paren_level,
self.line.depth, self.lowest_level_on_line))
def __repr__(self):
return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' %
(self.column, repr(self.next_token), self.paren_level,
'\n\t'.join(repr(s) for s in self.stack) + ']'))
def CanSplit(self, must_split):
"""Determine if we can split before the next token.
Arguments:
must_split: (bool) A newline was required before this token.
Returns:
True if the line can be split before the next token.
"""
current = self.next_token
previous = current.previous_token
if current.is_pseudo_paren:
return False
if (not must_split and
format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes and
format_token.Subtype.DICTIONARY_KEY not in current.subtypes and
not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')):
# In some situations, a dictionary may be multiline, but pylint doesn't
# like it. So don't allow it unless forced to.
return False
if (not must_split and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes and
not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')):
return False
if previous and previous.value == '(' and current.value == ')':
# Don't split an empty function call list if we aren't splitting before
# dict values.
token = previous.previous_token
while token:
prev = token.previous_token
if not prev or prev.name not in {'NAME', 'DOT'}:
break
token = token.previous_token
if token and format_token.Subtype.DICTIONARY_VALUE in token.subtypes:
if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'):
return False
if previous and previous.value == '.' and current.value == '.':
return False
return current.can_break_before
def MustSplit(self):
"""Returns True if the line must split before the next token."""
current = self.next_token
previous = current.previous_token
if current.is_pseudo_paren:
return False
if current.must_break_before:
return True
if not previous:
return False
if style.Get('SPLIT_ALL_COMMA_SEPARATED_VALUES') and previous.value == ',':
return True
if (style.Get('SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES') and
previous.value == ','):
# Avoid breaking in a container that fits in the current line if possible
opening = _GetOpeningBracket(current)
# Can't find opening bracket, behave the same way as
# SPLIT_ALL_COMMA_SEPARATED_VALUES
if not opening:
return True
# If the container doesn't fit in the current line, must split
return not self._ContainerFitsOnStartLine(opening)
if (self.stack[-1].split_before_closing_bracket and
(current.value in '}]' and style.Get('SPLIT_BEFORE_CLOSING_BRACKET') or
current.value in '}])' and style.Get('INDENT_CLOSING_BRACKETS'))):
# Split before the closing bracket if we can.
if format_token.Subtype.SUBSCRIPT_BRACKET not in current.subtypes:
return current.node_split_penalty != split_penalty.UNBREAKABLE
if (current.value == ')' and previous.value == ',' and
not _IsSingleElementTuple(current.matching_bracket)):
return True
# Prevent splitting before the first argument in compound statements
# with the exception of function declarations.
if (style.Get('SPLIT_BEFORE_FIRST_ARGUMENT') and
_IsCompoundStatement(self.line.first) and
not _IsFunctionDef(self.line.first)):
return False
###########################################################################
# List Splitting
if (style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('INDENT_CLOSING_BRACKETS') or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')):
bracket = current if current.ClosesScope() else previous
if format_token.Subtype.SUBSCRIPT_BRACKET not in bracket.subtypes:
if bracket.OpensScope():
if style.Get('COALESCE_BRACKETS'):
if current.OpensScope():
# Prefer to keep all opening brackets together.
return False
if (not _IsLastScopeInLine(bracket) or
unwrapped_line.IsSurroundedByBrackets(bracket)):
last_token = bracket.matching_bracket
else:
last_token = _LastTokenInLine(bracket.matching_bracket)
if not self._FitsOnLine(bracket, last_token):
# Split before the first element if the whole list can't fit on a
# single line.
self.stack[-1].split_before_closing_bracket = True
return True
elif (style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('INDENT_CLOSING_BRACKETS')) and current.ClosesScope():
# Split before and dedent the closing bracket.
return self.stack[-1].split_before_closing_bracket
if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and
current.is_name):
# An expression that's surrounded by parens gets split after the opening
# parenthesis.
def SurroundedByParens(token):
"""Check if it's an expression surrounded by parentheses."""
while token:
if token.value == ',':
return False
if token.value == ')':
return not token.next_token
if token.OpensScope():
token = token.matching_bracket.next_token
else:
token = token.next_token
return False
if (previous.value == '(' and not previous.is_pseudo_paren and
not unwrapped_line.IsSurroundedByBrackets(previous)):
pptoken = previous.previous_token
if (pptoken and not pptoken.is_name and not pptoken.is_keyword and
SurroundedByParens(current)):
return True
if (current.is_name or current.is_string) and previous.value == ',':
# If the list has function calls in it and the full list itself cannot
# fit on the line, then we want to split. Otherwise, we'll get something
# like this:
#
# X = [
# Bar(xxx='some string',
# yyy='another long string',
# zzz='a third long string'), Bar(
# xxx='some string',
# yyy='another long string',
# zzz='a third long string')
# ]
#
# or when a string formatting syntax.
func_call_or_string_format = False
tok = current.next_token
if current.is_name:
while tok and (tok.is_name or tok.value == '.'):
tok = tok.next_token
func_call_or_string_format = tok and tok.value == '('
elif current.is_string:
while tok and tok.is_string:
tok = tok.next_token
func_call_or_string_format = tok and tok.value == '%'
if func_call_or_string_format:
open_bracket = unwrapped_line.IsSurroundedByBrackets(current)
if open_bracket:
if open_bracket.value in '[{':
if not self._FitsOnLine(open_bracket,
open_bracket.matching_bracket):
return True
elif tok.value == '(':
if not self._FitsOnLine(current, tok.matching_bracket):
return True
if (current.OpensScope() and previous.value == ',' and
format_token.Subtype.DICTIONARY_KEY not in current.next_token.subtypes):
# If we have a list of tuples, then we can get a similar look as above. If
# the full list cannot fit on the line, then we want a split.
open_bracket = unwrapped_line.IsSurroundedByBrackets(current)
if (open_bracket and open_bracket.value in '[{' and
format_token.Subtype.SUBSCRIPT_BRACKET not in open_bracket.subtypes):
if not self._FitsOnLine(current, current.matching_bracket):
return True
###########################################################################
# Dict/Set Splitting
if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') and
format_token.Subtype.DICTIONARY_KEY in current.subtypes and
not current.is_comment):
# Place each dictionary entry onto its own line.
if previous.value == '{' and previous.previous_token:
opening = _GetOpeningBracket(previous.previous_token)
if (opening and opening.value == '(' and opening.previous_token and
opening.previous_token.is_name):
# This is a dictionary that's an argument to a function.
if (self._FitsOnLine(previous, previous.matching_bracket) and
previous.matching_bracket.next_token and
(not opening.matching_bracket.next_token or
opening.matching_bracket.next_token.value != '.') and
_ScopeHasNoCommas(previous)):
# Don't split before the key if:
# - The dictionary fits on a line, and
# - The function call isn't part of a builder-style call and
# - The dictionary has one entry and no trailing comma
return False
return True
if (style.Get('SPLIT_BEFORE_DICT_SET_GENERATOR') and
format_token.Subtype.DICT_SET_GENERATOR in current.subtypes):
# Split before a dict/set generator.
return True
if (format_token.Subtype.DICTIONARY_VALUE in current.subtypes or
(previous.is_pseudo_paren and previous.value == '(' and
not current.is_comment)):
# Split before the dictionary value if we can't fit every dictionary
# entry on its own line.
if not current.OpensScope():
opening = _GetOpeningBracket(current)
if not self._EachDictEntryFitsOnOneLine(opening):
return style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')
if previous.value == '{':
# Split if the dict/set cannot fit on one line and ends in a comma.
closing = previous.matching_bracket
if (not self._FitsOnLine(previous, closing) and
closing.previous_token.value == ','):
self.stack[-1].split_before_closing_bracket = True
return True
###########################################################################
# Argument List Splitting
if (style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and not current.is_comment and
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in
current.subtypes):
if (previous.value not in {'=', ':', '*', '**'} and
current.value not in ':=,)' and not _IsFunctionDefinition(previous)):
# If we're going to split the lines because of named arguments, then we
# want to split after the opening bracket as well. But not when this is
# part of a function definition.
if previous.value == '(':
# Make sure we don't split after the opening bracket if the
# continuation indent is greater than the opening bracket:
#
# a(
# b=1,
# c=2)
if (self._FitsOnLine(previous, previous.matching_bracket) and
unwrapped_line.IsSurroundedByBrackets(previous)):
# An argument to a function is a function call with named
# assigns.
return False
# Don't split if not required
if (not style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and
not style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')):
return False
column = self.column - self.stack[-1].last_space
return column > style.Get('CONTINUATION_INDENT_WIDTH')
opening = _GetOpeningBracket(current)
if opening:
return not self._ContainerFitsOnStartLine(opening)
if (current.value not in '{)' and previous.value == '(' and
self._ArgumentListHasDictionaryEntry(current)):
return True
if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'):
# Split before arguments in a function call or definition if the
# arguments are terminated by a comma.
opening = _GetOpeningBracket(current)
if opening and opening.previous_token and opening.previous_token.is_name:
if previous.value in '(,':
if opening.matching_bracket.previous_token.value == ',':
return True
if ((current.is_name or current.value in {'*', '**'}) and
previous.value == ','):
# If we have a function call within an argument list and it won't fit on
# the remaining line, but it will fit on a line by itself, then go ahead
# and split before the call.
opening = _GetOpeningBracket(current)
if (opening and opening.value == '(' and opening.previous_token and
(opening.previous_token.is_name or
opening.previous_token.value in {'*', '**'})):
is_func_call = False
opening = current
while opening:
if opening.value == '(':
is_func_call = True
break
if (not (opening.is_name or opening.value in {'*', '**'}) and
opening.value != '.'):
break
opening = opening.next_token
if is_func_call:
if (not self._FitsOnLine(current, opening.matching_bracket) or
(opening.matching_bracket.next_token and
opening.matching_bracket.next_token.value != ',' and
not opening.matching_bracket.next_token.ClosesScope())):
return True
pprevious = previous.previous_token
# A function call with a dictionary as its first argument may result in
# unreadable formatting if the dictionary spans multiple lines. The
# dictionary itself is formatted just fine, but the remaning arguments are
# indented too far:
#
# function_call({
# KEY_1: 'value one',
# KEY_2: 'value two',
# },
# default=False)
if (current.value == '{' and previous.value == '(' and pprevious and
pprevious.is_name):
dict_end = current.matching_bracket
next_token = dict_end.next_token
if next_token.value == ',' and not self._FitsOnLine(current, dict_end):
return True
if (current.is_name and pprevious and pprevious.is_name and
previous.value == '('):
if (not self._FitsOnLine(previous, previous.matching_bracket) and
_IsFunctionCallWithArguments(current)):
# There is a function call, with more than 1 argument, where the first
# argument is itself a function call with arguments that does not fit
# into the line. In this specific case, if we split after the first
# argument's opening '(', then the formatting will look bad for the
# rest of the arguments. E.g.:
#
# outer_function_call(inner_function_call(
# inner_arg1, inner_arg2),
# outer_arg1, outer_arg2)
#
# Instead, enforce a split before that argument to keep things looking
# good.
if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')):
return True
opening = _GetOpeningBracket(current)
if (opening and opening.value == '(' and opening.previous_token and
(opening.previous_token.is_name or
opening.previous_token.value in {'*', '**'})):
is_func_call = False
opening = current
while opening:
if opening.value == '(':
is_func_call = True
break
if (not (opening.is_name or opening.value in {'*', '**'}) and
opening.value != '.'):
break
opening = opening.next_token
if is_func_call:
if (not self._FitsOnLine(current, opening.matching_bracket) or
(opening.matching_bracket.next_token and
opening.matching_bracket.next_token.value != ',' and
not opening.matching_bracket.next_token.ClosesScope())):
return True
if (previous.OpensScope() and not current.OpensScope() and
not current.is_comment and
format_token.Subtype.SUBSCRIPT_BRACKET not in previous.subtypes):
if pprevious and not pprevious.is_keyword and not pprevious.is_name:
# We want to split if there's a comment in the container.
token = current
while token != previous.matching_bracket:
if token.is_comment:
return True
token = token.next_token
if previous.value == '(':
pptoken = previous.previous_token
if not pptoken or not pptoken.is_name:
# Split after the opening of a tuple if it doesn't fit on the current
# line and it's not a function call.
if self._FitsOnLine(previous, previous.matching_bracket):
return False
elif not self._FitsOnLine(previous, previous.matching_bracket):
if len(previous.container_elements) == 1:
return False
elements = previous.container_elements + [previous.matching_bracket]
i = 1
while i < len(elements):
if (not elements[i - 1].OpensScope() and
not self._FitsOnLine(elements[i - 1], elements[i])):
return True
i += 1
if (self.column_limit - self.column) / float(self.column_limit) < 0.3:
# Try not to squish all of the arguments off to the right.
return True
else:
# Split after the opening of a container if it doesn't fit on the
# current line.
if not self._FitsOnLine(previous, previous.matching_bracket):
return True
###########################################################################
# Original Formatting Splitting
# These checks rely upon the original formatting. This is in order to
# attempt to keep hand-written code in the same condition as it was before.
# However, this may cause the formatter to fail to be idempotent.
if (style.Get('SPLIT_BEFORE_BITWISE_OPERATOR') and current.value in '&|' and
previous.lineno < current.lineno):
# Retain the split before a bitwise operator.
return True
if (current.is_comment and
previous.lineno < current.lineno - current.value.count('\n')):
# If a comment comes in the middle of an unwrapped line (like an if
# conditional with comments interspersed), then we want to split if the
# original comments were on a separate line.
return True
return False
def AddTokenToState(self, newline, dry_run, must_split=False):
"""Add a token to the format decision state.
Allow the heuristic to try out adding the token with and without a newline.
Later on, the algorithm will determine which one has the lowest penalty.
Arguments:
newline: (bool) Add the token on a new line if True.
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The penalty of splitting after the current token.
"""
self._PushParameterListState(newline)
penalty = 0
if newline:
penalty = self._AddTokenOnNewline(dry_run, must_split)
else:
self._AddTokenOnCurrentLine(dry_run)
penalty += self._CalculateComprehensionState(newline)
penalty += self._CalculateParameterListState(newline)
return self.MoveStateToNextToken() + penalty
def _AddTokenOnCurrentLine(self, dry_run):
"""Puts the token on the current line.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Commit whitespace changes to the FormatToken if True.
"""
current = self.next_token
previous = current.previous_token
spaces = current.spaces_required_before
if isinstance(spaces, list):
# Don't set the value here, as we need to look at the lines near
# this one to determine the actual horizontal alignment value.
spaces = 0
if not dry_run:
current.AddWhitespacePrefix(newlines_before=0, spaces=spaces)
if previous.OpensScope():
if not current.is_comment:
# Align closing scopes that are on a newline with the opening scope:
#
# foo = [a,
# b,
# ]
self.stack[-1].closing_scope_indent = self.column - 1
if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'):
self.stack[-1].closing_scope_indent += 1
self.stack[-1].indent = self.column + spaces
else:
self.stack[-1].closing_scope_indent = (
self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.column += spaces
def _AddTokenOnNewline(self, dry_run, must_split):
"""Adds a line break and necessary indentation.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The split penalty for splitting after the current state.
"""
current = self.next_token
previous = current.previous_token
self.column = self._GetNewlineColumn()
if not dry_run:
indent_level = self.line.depth
spaces = self.column
if spaces:
spaces -= indent_level * style.Get('INDENT_WIDTH')
current.AddWhitespacePrefix(
newlines_before=1, spaces=spaces, indent_level=indent_level)
if not current.is_comment:
self.stack[-1].last_space = self.column
self.lowest_level_on_line = self.paren_level
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
dedent = (style.Get('CONTINUATION_INDENT_WIDTH'),
0)[style.Get('INDENT_CLOSING_BRACKETS')]
self.stack[-1].closing_scope_indent = max(0,
self.stack[-1].indent - dedent)
self.stack[-1].split_before_closing_bracket = True
# Calculate the split penalty.
penalty = current.split_penalty
if must_split:
# Don't penalize for a must split.
return penalty
if previous.is_pseudo_paren and previous.value == '(':
# Small penalty for splitting after a pseudo paren.
penalty += 50
# Add a penalty for each increasing newline we add, but don't penalize for
# splitting before an if-expression or list comprehension.
if current.value not in {'if', 'for'}:
last = self.stack[-1]
last.num_line_splits += 1
penalty += (
style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') *
last.num_line_splits)
if current.OpensScope() and previous.OpensScope():
# Prefer to keep opening brackets coalesced (unless it's at the beginning
# of a function call).
pprev = previous.previous_token
if not pprev or not pprev.is_name:
penalty += 10
return penalty + 10
def MoveStateToNextToken(self):
"""Calculate format decision state information and move onto the next token.
Before moving onto the next token, we first calculate the format decision
state given the current token and its formatting decisions. Then the format
decision state is set up so that the next token can be added.
Returns:
The penalty for the number of characters over the column limit.
"""
current = self.next_token
if not current.OpensScope() and not current.ClosesScope():
self.lowest_level_on_line = min(self.lowest_level_on_line,
self.paren_level)
# If we encounter an opening bracket, we add a level to our stack to prepare
# for the subsequent tokens.
if current.OpensScope():
last = self.stack[-1]
new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space
self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))
self.paren_level += 1
# If we encounter a closing bracket, we can remove a level from our
# parenthesis stack.
if len(self.stack) > 1 and current.ClosesScope():
if format_token.Subtype.DICTIONARY_KEY_PART in current.subtypes:
self.stack[-2].last_space = self.stack[-2].indent
else:
self.stack[-2].last_space = self.stack[-1].last_space
self.stack.pop()
self.paren_level -= 1
is_multiline_string = current.is_string and '\n' in current.value
if is_multiline_string:
# This is a multiline string. Only look at the first line.
self.column += len(current.value.split('\n')[0])
elif not current.is_pseudo_paren:
self.column += len(current.value)
self.next_token = self.next_token.next_token
# Calculate the penalty for overflowing the column limit.
penalty = 0
if (not current.is_pylint_comment and not current.is_pytype_comment and
self.column > self.column_limit):
excess_characters = self.column - self.column_limit
penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters
if is_multiline_string:
# If this is a multiline string, the column is actually the
# end of the last line in the string.
self.column = len(current.value.split('\n')[-1])
return penalty
def _CalculateComprehensionState(self, newline):
"""Makes required changes to comprehension state.
Args:
newline: Whether the current token is to be added on a newline.
Returns:
The penalty for the token-newline combination given the current
comprehension state.
"""
current = self.next_token
previous = current.previous_token
top_of_stack = self.comp_stack[-1] if self.comp_stack else None
penalty = 0
if top_of_stack is not None:
# Check if the token terminates the current comprehension.
if current == top_of_stack.closing_bracket:
last = self.comp_stack.pop()
# Lightly penalize comprehensions that are split across multiple lines.
if last.has_interior_split:
penalty += style.Get('SPLIT_PENALTY_COMPREHENSION')
return penalty
if newline:
top_of_stack.has_interior_split = True
if (format_token.Subtype.COMP_EXPR in current.subtypes and
format_token.Subtype.COMP_EXPR not in previous.subtypes):
self.comp_stack.append(object_state.ComprehensionState(current))
return penalty
if (current.value == 'for' and
format_token.Subtype.COMP_FOR in current.subtypes):
if top_of_stack.for_token is not None:
# Treat nested comprehensions like normal comp_if expressions.
# Example:
# my_comp = [
# a.qux + b.qux
# for a in foo
# --> for b in bar <--
# if a.zut + b.zut
# ]
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and
top_of_stack.has_split_at_for != newline and
(top_of_stack.has_split_at_for or
not top_of_stack.HasTrivialExpr())):
penalty += split_penalty.UNBREAKABLE
else:
top_of_stack.for_token = current
top_of_stack.has_split_at_for = newline
# Try to keep trivial expressions on the same line as the comp_for.
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and
top_of_stack.HasTrivialExpr()):
penalty += split_penalty.CONNECTED
if (format_token.Subtype.COMP_IF in current.subtypes and
format_token.Subtype.COMP_IF not in previous.subtypes):
# Penalize breaking at comp_if when it doesn't match the newline structure
# in the rest of the comprehension.
if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and
top_of_stack.has_split_at_for != newline and
(top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())):
penalty += split_penalty.UNBREAKABLE
return penalty
def _PushParameterListState(self, newline):
"""Push a new parameter list state for a function definition.
Args:
newline: Whether the current token is to be added on a newline.
"""
current = self.next_token
previous = current.previous_token
if _IsFunctionDefinition(previous):
first_param_column = previous.total_length + self.stack[-2].indent
self.param_list_stack.append(
object_state.ParameterListState(previous, newline,
first_param_column))
def _CalculateParameterListState(self, newline):
"""Makes required changes to parameter list state.
Args:
newline: Whether the current token is to be added on a newline.
Returns:
The penalty for the token-newline combination given the current
parameter state.
"""
current = self.next_token
previous = current.previous_token
penalty = 0
if _IsFunctionDefinition(previous):
first_param_column = previous.total_length + self.stack[-2].indent
if not newline:
param_list = self.param_list_stack[-1]
if param_list.parameters and param_list.has_typed_return:
last_param = param_list.parameters[-1].first_token
last_token = _LastTokenInLine(previous.matching_bracket)
total_length = last_token.total_length
total_length -= last_param.total_length - len(last_param.value)
if total_length + self.column > self.column_limit:
# If we need to split before the trailing code of a function
# definition with return types, then also split before the opening
# parameter so that the trailing bit isn't indented on a line by
# itself:
#
# def rrrrrrrrrrrrrrrrrrrrrr(ccccccccccccccccccccccc: Tuple[Text]
# ) -> List[Tuple[Text, Text]]:
# pass
penalty += split_penalty.VERY_STRONGLY_CONNECTED
return penalty
if first_param_column <= self.column:
# Make sure we don't split after the opening bracket if the
# continuation indent is greater than the opening bracket:
#
# a(
# b=1,
# c=2)
penalty += split_penalty.VERY_STRONGLY_CONNECTED
return penalty
if not self.param_list_stack:
return penalty
param_list = self.param_list_stack[-1]
if current == self.param_list_stack[-1].closing_bracket:
self.param_list_stack.pop() # We're done with this state.
if newline and param_list.has_typed_return:
if param_list.split_before_closing_bracket:
penalty -= split_penalty.STRONGLY_CONNECTED
elif param_list.LastParamFitsOnLine(self.column):
penalty += split_penalty.STRONGLY_CONNECTED
if (not newline and param_list.has_typed_return and
param_list.has_split_before_first_param):
# Prefer splitting before the closing bracket if there's a return type
# and we've already split before the first parameter.
penalty += split_penalty.STRONGLY_CONNECTED
return penalty
if not param_list.parameters:
return penalty
if newline:
if self._FitsOnLine(param_list.parameters[0].first_token,
_LastTokenInLine(param_list.closing_bracket)):
penalty += split_penalty.STRONGLY_CONNECTED
if (not newline and style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and
param_list.has_default_values and
current != param_list.parameters[0].first_token and
current != param_list.closing_bracket and
format_token.Subtype.PARAMETER_START in current.subtypes):
# If we want to split before parameters when there are named assigns,
# then add a penalty for not splitting.
penalty += split_penalty.STRONGLY_CONNECTED
return penalty
def _GetNewlineColumn(self):
"""Return the new column on the newline."""
current = self.next_token
previous = current.previous_token
top_of_stack = self.stack[-1]
if isinstance(current.spaces_required_before, list):
# Don't set the value here, as we need to look at the lines near
# this one to determine the actual horizontal alignment value.
return 0
elif current.spaces_required_before > 2 or self.line.disable:
return current.spaces_required_before
if current.OpensScope():
return top_of_stack.indent if self.paren_level else self.first_indent
if current.ClosesScope():
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
return max(0,
top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH'))
return top_of_stack.closing_scope_indent
if (previous and previous.is_string and current.is_string and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes):
return previous.column
if style.Get('INDENT_DICTIONARY_VALUE'):
if previous and (previous.value == ':' or previous.is_pseudo_paren):
if format_token.Subtype.DICTIONARY_VALUE in current.subtypes:
return top_of_stack.indent
if (_IsCompoundStatement(self.line.first) and
(not (style.Get('DEDENT_CLOSING_BRACKETS') or
style.Get('INDENT_CLOSING_BRACKETS')) or
style.Get('SPLIT_BEFORE_FIRST_ARGUMENT'))):
token_indent = (
len(self.line.first.whitespace_prefix.split('\n')[-1]) +
style.Get('INDENT_WIDTH'))
if token_indent == top_of_stack.indent:
if self.param_list_stack and _IsFunctionDef(self.line.first):
last_param = self.param_list_stack[-1]
if (last_param.LastParamFitsOnLine(token_indent) and
not last_param.LastParamFitsOnLine(
token_indent + style.Get('CONTINUATION_INDENT_WIDTH'))):
self.param_list_stack[-1].split_before_closing_bracket = True
return token_indent
if not last_param.LastParamFitsOnLine(token_indent):
self.param_list_stack[-1].split_before_closing_bracket = True
return token_indent
return token_indent + style.Get('CONTINUATION_INDENT_WIDTH')
return top_of_stack.indent
def _FitsOnLine(self, start, end):
"""Determines if line between start and end can fit on the current line."""
length = end.total_length - start.total_length
if not start.is_pseudo_paren:
length += len(start.value)
return length + self.column <= self.column_limit
def _EachDictEntryFitsOnOneLine(self, opening):
"""Determine if each dict elems can fit on one line."""
def PreviousNonCommentToken(tok):
tok = tok.previous_token
while tok.is_comment:
tok = tok.previous_token
return tok
def ImplicitStringConcatenation(tok):
num_strings = 0
if tok.is_pseudo_paren:
tok = tok.next_token
while tok.is_string:
num_strings += 1
tok = tok.next_token
return num_strings > 1
def DictValueIsContainer(opening, closing):
if not opening or not closing:
return False
colon = opening.previous_token
while colon:
if not colon.is_pseudo_paren:
break
colon = colon.previous_token
if not colon or colon.value != ':':
return False
key = colon.previous_token
if not key:
return False
return format_token.Subtype.DICTIONARY_KEY_PART in key.subtypes
closing = opening.matching_bracket
entry_start = opening.next_token
current = opening.next_token.next_token
while current and current != closing:
if format_token.Subtype.DICTIONARY_KEY in current.subtypes:
prev = PreviousNonCommentToken(current)
if prev.value == ',':
prev = PreviousNonCommentToken(prev.previous_token)
if not DictValueIsContainer(prev.matching_bracket, prev):
length = prev.total_length - entry_start.total_length
length += len(entry_start.value)
if length + self.stack[-2].indent >= self.column_limit:
return False
entry_start = current
if current.OpensScope():
if ((current.value == '{' or
(current.is_pseudo_paren and current.next_token.value == '{') and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes) or
ImplicitStringConcatenation(current)):
# A dictionary entry that cannot fit on a single line shouldn't matter
# to this calculation. If it can't fit on a single line, then the
# opening should be on the same line as the key and the rest on
# newlines after it. But the other entries should be on single lines
# if possible.
if current.matching_bracket:
current = current.matching_bracket
while current:
if current == closing:
return True
if format_token.Subtype.DICTIONARY_KEY in current.subtypes:
entry_start = current
break
current = current.next_token
else:
current = current.matching_bracket
else:
current = current.next_token
# At this point, current is the closing bracket. Go back one to get the end
# of the dictionary entry.
current = PreviousNonCommentToken(current)
length = current.total_length - entry_start.total_length
length += len(entry_start.value)
return length + self.stack[-2].indent <= self.column_limit
def _ArgumentListHasDictionaryEntry(self, token):
"""Check if the function argument list has a dictionary as an arg."""
if _IsArgumentToFunction(token):
while token:
if token.value == '{':
length = token.matching_bracket.total_length - token.total_length
return length + self.stack[-2].indent > self.column_limit
if token.ClosesScope():
break
if token.OpensScope():
token = token.matching_bracket
token = token.next_token
return False
def _ContainerFitsOnStartLine(self, opening):
"""Check if the container can fit on its starting line.
Arguments:
opening: (FormatToken) The unwrapped line we're currently processing.
Returns:
True if the container fits on the start line.
"""
return (opening.matching_bracket.total_length - opening.total_length +
self.stack[-1].indent) <= self.column_limit
_COMPOUND_STMTS = frozenset(
{'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class'})
def _IsCompoundStatement(token):
if token.value == 'async':
token = token.next_token
return token.value in _COMPOUND_STMTS
def _IsFunctionDef(token):
if token.value == 'async':
token = token.next_token
return token.value == 'def'
def _IsFunctionCallWithArguments(token):
while token:
if token.value == '(':
token = token.next_token
return token and token.value != ')'
elif token.name not in {'NAME', 'DOT', 'EQUAL'}:
break
token = token.next_token
return False
def _IsArgumentToFunction(token):
bracket = unwrapped_line.IsSurroundedByBrackets(token)
if not bracket or bracket.value != '(':
return False
previous = bracket.previous_token
return previous and previous.is_name
def _GetLengthOfSubtype(token, subtype, exclude=None):
current = token
while (current.next_token and subtype in current.subtypes and
(exclude is None or exclude not in current.subtypes)):
current = current.next_token
return current.total_length - token.total_length + 1
def _GetOpeningBracket(current):
"""Get the opening bracket containing the current token."""
if current.matching_bracket and not current.is_pseudo_paren:
return current if current.OpensScope() else current.matching_bracket
while current:
if current.ClosesScope():
current = current.matching_bracket
elif current.is_pseudo_paren:
current = current.previous_token
elif current.OpensScope():
return current
current = current.previous_token
return None
def _LastTokenInLine(current):
while not current.is_comment and current.next_token:
current = current.next_token
return current
def _IsFunctionDefinition(current):
prev = current.previous_token
return (current.value == '(' and prev and
format_token.Subtype.FUNC_DEF in prev.subtypes)
def _IsLastScopeInLine(current):
current = current.matching_bracket
while current:
current = current.next_token
if current and current.OpensScope():
return False
return True
def _IsSingleElementTuple(token):
"""Check if it's a single-element tuple."""
close = token.matching_bracket
token = token.next_token
num_commas = 0
while token != close:
if token.value == ',':
num_commas += 1
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return num_commas == 1
def _ScopeHasNoCommas(token):
"""Check if the scope has no commas."""
close = token.matching_bracket
token = token.next_token
while token != close:
if token.value == ',':
return False
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return True
class _ParenState(object):
"""Maintains the state of the bracket enclosures.
A stack of _ParenState objects are kept so that we know how to indent relative
to the brackets.
Attributes:
indent: The column position to which a specified parenthesis level needs to
be indented.
last_space: The column position of the last space on each level.
split_before_closing_bracket: Whether a newline needs to be inserted before
the closing bracket. We only want to insert a newline before the closing
bracket if there also was a newline after the beginning left bracket.
num_line_splits: Number of line splits this _ParenState contains already.
Each subsequent line split gets an increasing penalty.
"""
# TODO(morbo): This doesn't track "bin packing."
def __init__(self, indent, last_space):
self.indent = indent
self.last_space = last_space
self.closing_scope_indent = 0
self.split_before_closing_bracket = False
self.num_line_splits = 0
def Clone(self):
state = _ParenState(self.indent, self.last_space)
state.closing_scope_indent = self.closing_scope_indent
state.split_before_closing_bracket = self.split_before_closing_bracket
state.num_line_splits = self.num_line_splits
return state
def __repr__(self):
return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % (
self.indent, self.last_space, self.closing_scope_indent)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self == other
def __hash__(self, *args, **kwargs):
return hash((self.indent, self.last_space, self.closing_scope_indent,
self.split_before_closing_bracket, self.num_line_splits))
|
the-stack_106_31699 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
SELF_LINK = '$(ref.{}.selfLink)'
PROP = '$(ref.{}.{})'
def gen_instance_template(name, instance_template):
return {
'name': name + '-instance-template',
'type': 'compute.v1.instanceTemplate',
'properties': {'properties': instance_template}
}
def gen_health_check(name, health_check_template, port, ssl=False):
return {
'name': name + '-health-check',
'type': 'compute.v1.{}'.format(
'httpsHealthCheck' if ssl else 'httpHealthCheck'
),
'properties': dict(
port=port,
**health_check_template
)
}
def gen_target_proxy(name, url_map, cert=None):
properties = {
'urlMap': SELF_LINK.format(url_map['name']),
'description': (
'proxies from the fowarding rule'
'to the instance group manager'
)
}
if cert:
properties['sslCertificates'] = [SELF_LINK.format(cert['name'])]
return {
'name': name + '-target-proxy-ssl',
'type': 'compute.v1.targetHttpsProxy',
'properties': properties
}
else:
return {
'name': name + '-target-proxy',
'type': 'compute.v1.targetHttpProxy',
'properties': properties
}
def gen_firewall_rule(name, port, instance_template):
firewall_rule = {
'name': name + '-firewall-rule',
'type': 'compute.v1.firewall',
'properties': {
'allowed': [{'IPProtocol': 'tcp', 'ports': [port]}],
'sourceRanges': ['0.0.0.0/0'],
}
}
if 'tags' in instance_template['properties']:
firewall_rule['properties']['targetTags'] = instance_template[
'properties']['tags']
return firewall_rule
def gen_target_pool(name, region, health_checks):
return {
'name': name + '-target-pool-' + region,
'type': 'compute.v1.targetPool',
'properties': {
'region': region,
'healthChecks': [
SELF_LINK.format(health_check['name'])
for health_check in health_checks
],
'instances': [],
'description': (
'A target pool to provide health checks'
'to individual instances serving APIs'
'Does not serve any forwarding rules'
'Instances are auto added by IGMs'
),
'sessionAffinity': 'NONE'
}
}
def gen_global_forwarding_rule(name, target_proxy, port, ip_address=None):
forwarding_rule = {
'name': name + '-fowarding-rule-' + target_proxy['name'],
'type': 'compute.v1.globalForwardingRule',
'properties': {
'target': SELF_LINK.format(target_proxy['name']),
'portRange': port,
'IPProtocol': 'TCP',
}
}
if ip_address:
forwarding_rule['properties']['IPAddress'] = ip_address
return forwarding_rule
def gen_url_map(name, backend_service, dns_name=''):
service_link = SELF_LINK.format(backend_service['name'])
return {
'name': name + '-url-map',
'type': 'compute.v1.urlMap',
'properties': {
'defaultService': service_link,
'hostRules': [{
'description': (
'Route all traffic from the appropriate'
'DNS address to your backend'
),
'hosts': ['*.' + dns_name],
'pathMatcher': 'all'
}],
'pathMatchers': [{
'description': 'all paths',
'name': 'all',
'defaultService': service_link,
'pathRules': [{
'paths': ['/*'],
'service': service_link
}]
}]
}
}
def gen_backend_service(name,
backend_template,
igms,
health_check=None,
https=False):
backends = [
dict(
group=PROP.format(igm['name'], 'instanceGroup'),
**backend_template
) for igm in igms
]
backend_service = {
'name': name + '-backend-service',
'type': 'compute.v1.backendService',
'properties': {
'backends': backends,
'portName': 'api-port',
'protocol': 'HTTPS' if https else 'HTTP',
}
}
if health_check:
backend_service['properties']['healthChecks'] = [
SELF_LINK.format(health_check['name']),
]
return backend_service
def gen_instance_group_manager(name,
zone,
port,
instance_template,
target_pool=None):
return {
'name': name + '-igm-' + zone,
'type': 'compute.v1.instanceGroupManager',
'properties': {
'baseInstanceName': instance_template['name'],
'instanceTemplate': SELF_LINK.format(instance_template['name']),
'targetSize': 1,
'zone': zone,
'targetPools': [
SELF_LINK.format(target_pool['name'])
] if target_pool else [],
'namedPorts': [{
'name': 'api-port',
'port': port
}]
}
}
def gen_autoscaler(name, autoscaler_template, instance_group_manager):
return {
'name': name + instance_group_manager['name'] + '-autoscaler',
'type': 'compute.v1.autoscaler',
'properties': dict(
zone=instance_group_manager['properties']['zone'],
target=SELF_LINK.format(instance_group_manager['name']),
**autoscaler_template
)
}
def GenerateConfig(context):
name = context.env['deployment']
resources = []
instance_template = gen_instance_template(
name,
context.properties['instance_template']
)
resources.append(instance_template)
cert = context.properties.get('ssl_certificate')
port = context.properties.get('port', 443 if cert else 8080)
health_check_template = context.properties.get('health_check')
# Create a health Check
health_check = gen_health_check(
name, health_check_template, port, ssl=cert)
resources.append(health_check)
# Create a target pool using the health check in each region
regions = {
zone: zone.rsplit('-', 1)[0]
for zone in context.properties['zones']
}
target_pools = dict()
for region in set(regions.values()):
target_pool = gen_target_pool(name, region, [health_check])
target_pools[region] = target_pool
resources.append(target_pool)
# Create a managed instance group in each zone
igms = []
for zone in context.properties['zones']:
igm = gen_instance_group_manager(
name,
zone,
port,
instance_template,
target_pool=target_pools[regions[zone]]
)
igms.append(igm)
resources.append(igm)
# Optionally autoscale those managed instance groups
autoscaler_template = context.properties.get('autoscaler')
if autoscaler_template:
for igm in igms:
autoscaler = gen_autoscaler(name, autoscaler_template, igm)
resources.append(autoscaler)
# A backend service that load balances across all the IGMs
backend_service = gen_backend_service(
name,
context.properties['backend_service_template'],
igms,
health_check=health_check,
https=cert
)
resources.append(backend_service)
# A trivial URL Map that only maps to the single backend service
url_map = gen_url_map(
name, backend_service, dns_name=context.properties.get('dns_name', ''))
resources.append(url_map)
# A target proxy that connects the fowarding rule and the urlmap
target_proxy = gen_target_proxy(name, url_map, cert=cert)
resources.append(target_proxy)
# A forwarding rule that uses the provided static IP
forwarding_rule = gen_global_forwarding_rule(
name,
target_proxy,
port,
ip_address=context.properties.get('ip_address')
)
resources.append(forwarding_rule)
firewall_rule = gen_firewall_rule(name, port, instance_template)
resources.append(firewall_rule)
return yaml.dump({'resources': resources})
|
the-stack_106_31701 | #!/usr/bin/env python
#
# NLP
# Andrew D'Amico
# MSDS 453 Natural Language Processing
# Northwestern University
# Copyright (c) 2022, Andrew D'Amico. All rights reserved.
# Licenced under BSD Licence.
import datetime
from NLPPrep import tokenization
import math
class NewsArticle(object):
"""
A News Article is an object which contains a headline, a synopsis,
keywords, and meta data for processing.
headline: Takes a string, generally in the form of a senten e
date: string, in Y-m-day format
synopsis: string
keyword: list of keywords
"""
def __init__(self, headline: str = None, date: str = None, synopsis: str = None,
keywords: list = None):
self._headline = headline
self._date = datetime.datetime.strptime(date, '%Y-%m-%d')
self._synopsis = synopsis
self._keywords = keywords
self._tokens = None
self._dsi = None
self._embeddings = None
@property
def headline(self):
return self._headline
@property
def date(self):
return self._date
@property
def synopsis(self):
return self._synopsis
@property
def keywords(self):
return self._keywords
@property
def tokens(self):
if self._tokens:
pass
else:
self._tokens = self.build_tokens()
return self._tokens
@property
def dsi(self):
if self._dsi == None:
self._dsi = tokenization(self.headline)
return self._dsi
@property
def embeddings(self):
return self._embeddings
@embeddings.setter
def embeddings(self, data):
self._embeddings = data
def build_tokens(self):
container = " "
try:
if self.headline:
container = tokenization(self.headline)
pass
except:
print ("Cant tokenize headline")
try:
if self.synopsis:
container += tokenization(self.synopsis)
else:
pass
except:
if math.isnan(self.synopsis):
self._synopsis = " "
else:
pass
temp_DSI = container[0]
for word in range(1, len(container)):
temp_DSI += ' '+container[word]
return temp_DSI
class Corpus(object):
"""
A Corpus is a collection of News Articles of the class NewsArticles
"""
def __init__(self, db = None):
self._corpus = self.build_corpus(db, parser=NewsArticle)
@property
def corpus(self):
return self._corpus
def build_corpus(self, db, parser=NewsArticle):
corpus = [(
parser(
headline=row.headline,
date=row.date,
synopsis=row.snippet,
keywords=row.keywords
)) for index, row in db.iterrows()
]
return corpus
|
the-stack_106_31702 | # -*- coding: utf-8 -*-
import os.path
from ..decorators import linter
from ..parsers.base import ParserBase
from ..util.system import JAVA_SEP, vendored_path
GROOVY_PATH = vendored_path(os.path.join("groovy", "groovy-all-2.4.15.jar"))
SLF4J_PATH = vendored_path(os.path.join("groovy", "slf4j-api-1.7.25.jar"))
CODENARC_PATH = vendored_path(os.path.join("codenarc", "CodeNarc-1.2.1.jar"))
@linter(
name="codenarc",
install=[],
help_cmd=[
"java",
"-classpath",
"{}{}{}{}{}{}{}{}{}".format(
GROOVY_PATH,
JAVA_SEP,
CODENARC_PATH,
JAVA_SEP,
SLF4J_PATH,
JAVA_SEP,
vendored_path("codenarc"),
JAVA_SEP,
".",
),
"org.codenarc.CodeNarc",
"-help",
],
run=[
"java",
"-classpath",
"{}{}{}{}{}{}{}{}{}".format(
GROOVY_PATH,
JAVA_SEP,
CODENARC_PATH,
JAVA_SEP,
SLF4J_PATH,
JAVA_SEP,
vendored_path("codenarc"),
JAVA_SEP,
".",
),
"org.codenarc.CodeNarc",
"-includes=**/*.groovy,**/Jenkinsfile,**/jenkinsfile,**/...groovy",
"-report=console",
"-rulesetfiles={}".format(os.path.join(os.getcwd(), "codenarc.xml")),
],
rundefault=[
"java",
"-classpath",
"{}{}{}{}{}{}{}{}{}".format(
GROOVY_PATH,
JAVA_SEP,
CODENARC_PATH,
JAVA_SEP,
SLF4J_PATH,
JAVA_SEP,
vendored_path("codenarc"),
JAVA_SEP,
".",
),
"org.codenarc.CodeNarc",
"-includes=**/*.groovy,**/Jenkinsfile,**/jenkinsfile,**/...groovy",
"-report=console",
"-rulesetfiles=codenarc.xml",
],
dotfiles=["codenarc.xml"],
language="groovy",
autorun=True,
run_per_file=False,
)
class CodenarcParser(ParserBase):
"""Parse Codenarc output."""
def parse(self, lint_data):
messages = set()
path = ""
msg = ""
line_no = -1
for line in lint_data.split("\n"):
try:
if line.strip().startswith("File:"):
path = line.split("File:")[-1].strip()
continue
if line.strip().startswith("Violation:"):
parts = line.strip().split()
line_no = int(parts[3].split("=")[-1])
msg = line.strip()
else:
msg += "\n" + line
if "Src=" in line:
messages.add((path, line_no, msg))
msg = ""
except (ValueError, IndexError, TypeError):
print("Invalid message: {0}".format(line))
return messages
|
the-stack_106_31705 | from joblib import delayed, Parallel
import os
import sys
import glob
from tqdm import tqdm
import cv2
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def extract_video_opencv(v_path, f_root, dim=240):
'''v_path: single video path;
f_root: root to store frames'''
v_class = v_path.split('/')[-2]
v_name = os.path.basename(v_path)[0:-4]
out_dir = os.path.join(f_root, v_class, v_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
vidcap = cv2.VideoCapture(v_path)
nb_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
if (width == 0) or (height==0):
print(v_path, 'not successfully loaded, drop ..'); return
new_dim = resize_dim(width, height, dim)
success, image = vidcap.read()
count = 1
while success:
image = cv2.resize(image, new_dim, interpolation = cv2.INTER_LINEAR)
cv2.imwrite(os.path.join(out_dir, 'image_%05d.jpg' % count), image,
[cv2.IMWRITE_JPEG_QUALITY, 80])# quality from 0-100, 95 is default, high is good
success, image = vidcap.read()
count += 1
if nb_frames > count:
print('/'.join(out_dir.split('/')[-2::]), 'NOT extracted successfully: %df/%df' % (count, nb_frames))
vidcap.release()
def resize_dim(w, h, target):
'''resize (w, h), such that the smaller side is target, keep the aspect ratio'''
if w >= h:
return (int(target * w / h), int(target))
else:
return (int(target), int(target * h / w))
def main_UCF101(v_root, f_root):
print('extracting UCF101 ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
Parallel(n_jobs=4)(delayed(extract_video_opencv)(p, f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_HMDB51(v_root, f_root):
print('extracting HMDB51 ... ')
print('extracting videos from %s' % v_root)
print('frame save to %s' % f_root)
if not os.path.exists(f_root): os.makedirs(f_root)
v_act_root = glob.glob(os.path.join(v_root, '*/'))
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.avi'))
v_paths = sorted(v_paths)
action_f_root = f_root + j.split('\\')[1] + '/'
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, action_f_root) for p in tqdm(v_paths, total=len(v_paths)))
def main_kinetics400(v_root, f_root, dim=150):
print('extracting Kinetics400 ... ')
for basename in ['train_split', 'val_split']:
v_root_real = v_root + '/' + basename
if not os.path.exists(v_root_real):
print('Wrong v_root'); sys.exit()
f_root_real = '/scratch/local/ssd/htd/kinetics400/frame_full' + '/' + basename
print('Extract to: \nframe: %s' % f_root_real)
if not os.path.exists(f_root_real): os.makedirs(f_root_real)
v_act_root = glob.glob(os.path.join(v_root_real, '*/'))
v_act_root = sorted(v_act_root)
# if resume, remember to delete the last video folder
for i, j in tqdm(enumerate(v_act_root), total=len(v_act_root)):
v_paths = glob.glob(os.path.join(j, '*.mp4'))
v_paths = sorted(v_paths)
# for resume:
v_class = j.split('/')[-2]
out_dir = os.path.join(f_root_real, v_class)
if os.path.exists(out_dir): print(out_dir, 'exists!'); continue
print('extracting: %s' % v_class)
# dim = 150 (crop to 128 later) or 256 (crop to 224 later)
Parallel(n_jobs=32)(delayed(extract_video_opencv)(p, f_root_real, dim=dim) for p in tqdm(v_paths, total=len(v_paths)))
if __name__ == '__main__':
# v_root is the video source path, f_root is where to store frames
# edit 'your_path' here:
# main_UCF101(v_root='D:/VideoData/UCF101/videos',
# f_root='D:/VideoData/UCF101/frame')
main_HMDB51(v_root='D:/VideoData/HMDB51/videos',
f_root='D:/VideoData/HMDB51/frames/')
|
the-stack_106_31706 | from tkinter import *
from tkinter import filedialog
from styles import *
from downloader import Downloader
class Singleton(type):
"""
Acts as a metaclass to allow other classes to become Singleton classes
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SaveLocationForm(metaclass=Singleton):
def __init__(self, window) -> None:
self.file_entry_label = Label(
window,
text="Download Location",
bg=BKG_COLOR,
fg=FG_COLOR,
font=label_style
)
self.file_entry = Entry(
window,
bg=BKG_COLOR,
fg=FG_COLOR,
font=entry_style,
width=30
)
self.browse_btn = Button(
window,
image=browse_btn,
command=self.browse_func,
width=47,
height=47,
borderwidth=0,
relief='flat',
highlightthickness=0,
bd=0,
)
self.position()
def position(self):
self.file_entry_label.grid(row = 1, columnspan = 2, padx=(25, 0), pady=(30,10), sticky="W")
self.file_entry.grid(row = 2, column = 0, padx=(25, 0), pady=0, sticky="W")
self.browse_btn.grid(row = 2, column = 1, padx=(10, 0), pady=0, sticky="W")
def browse_func(self):
"""
Called on button press
Popup to allow users to find a directory on their system
"""
self.file_entry.delete(0, END)
filename = filedialog.askdirectory()
self.file_entry.insert(END, filename)
def get_path(self):
"""
Called in the DownloadForm
Gets the path the user wants to save the videos in
"""
return self.file_entry.get()
class YouTubeVideoForm(metaclass=Singleton):
def __init__(self, window) -> None:
self.video_link_label = Label(
window,
text="YouTube Video Link",
bg=BKG_COLOR,
fg=FG_COLOR,
font=label_style
)
self.link_entry = Entry(
window,
bg=BKG_COLOR,
fg=FG_COLOR,
font= entry_style,
width= 30
)
self.link_entry_btn = Button(
window,
image=add_btn,
command=self.add_link,
width=47,
height=47,
borderwidth=0,
relief='flat',
highlightthickness=0,
bd=0,
)
# Column 2
self.video_list_label = Label(
window,
text="Selected Videos",
bg=BKG_COLOR,
fg=FG_COLOR,
font=label_style
)
self.links_entry_delete_btn = Button(
window,
image=delete_btn,
command=self.delete_link,
width=47,
height=47,
borderwidth=0,
relief='flat',
highlightthickness=0,
bd=0,
)
self.links_list_clear_btn = Button(
window,
image=clear_list_btn,
command=self.clear_list,
width=47,
height=47,
borderwidth=0,
relief='flat',
highlightthickness=0,
bd=0,
)
self.links_list = Listbox(
window,
width=47,
height=20,
fg=BKG_COLOR,
bg=FG_COLOR,
)
self.position()
def position(self):
self.video_link_label.grid(row=3, columnspan=2, padx=(25, 0), pady=(25,10), sticky="W")
self.link_entry.grid(row=4, column=0, padx=(25,0), pady=0, sticky="W")
self.link_entry_btn.grid(row=4, column=1, padx=(10,0), pady=0, sticky="W")
# Column 2
self.video_list_label.grid(row=1, column=3, padx=(60,0), pady=(25,10), sticky="W")
self.links_entry_delete_btn.grid(row=1, column=4, pady=(25,10), sticky="E")
self.links_list_clear_btn.grid(row=1, column=5, pady=(25,10), sticky="E")
self.links_list.grid(row=2, rowspan=10, column=3, columnspan=3, padx=(60,0), pady=(0,10), sticky="W")
def add_link(self):
"""
Called on button press
Checks if Youtube Link and adds them to the ListBox
"""
if "https://www.youtube.com/watch?v=" in self.link_entry.get():
self.links_list.insert("end", self.link_entry.get())
self.link_entry.delete(0, "end")
else:
# TODO - Add error popup to remind user to upload YOUTUBE LINKS
pass
def delete_link(self):
"""
Called on button press
Deletes the link the User has selected
"""
self.links_list.delete(self.links_list.curselection()[0])
def clear_list(self):
"""
Called on button press
Deletes the entire ListBox of YT Video Links
"""
self.links_list.delete(0, END)
def get_list(self):
"""
Called in DownloadForm
Gets the entire ListBox of links (works bcs of Singleton)
"""
return list(self.links_list.get(first=0, last=END))
def remove_from_list(self, vid):
"""
Called in DownloadForm
Finds the first occurence of a link and removes it from the ListBox
"""
idx = self.links_list.get(0, END).index(vid)
self.links_list.delete(idx)
class DownloadForm(metaclass=Singleton):
def __init__(self, window):
self.format_label = Label(
window,
text="Format",
bg=BKG_COLOR,
fg=FG_COLOR,
font=label_style
)
# For the dropdown
self.vformat = StringVar(window)
self.vformat.set("mp4")
self.format_dropdown = OptionMenu(
window,
self.vformat,
"mp3",
"mp4"
)
self.format_dropdown.config(font=entry_style)
self.download_btn = Button(
window,
text = "Download!",
command = self.download,
image=download_btn,
width=450,
height=47,
borderwidth=0,
relief='flat',
highlightthickness=0,
bd=0,
)
self.position()
def position(self):
self.format_label.grid(row=5, columnspan=2, padx=(25, 0), pady=(25,10), sticky="W")
self.format_dropdown.grid(row=6, columnspan=2, padx=(25,0), pady=0, sticky="W")
self.download_btn.grid(row=7, columnspan=2, padx=(25,0), pady=(20,0), sticky="W")
def download(self):
"""
REQUIRES:
- video list from YouTubeVideoForm
- path from SaveLocationForm
Downloads all the videos in the ListBox in YoutubeVideoForm
"""
video_list = YouTubeVideoForm().get_list()
path = SaveLocationForm().get_path()
if len(path) > 0 and len(video_list) > 0:
for vid in video_list:
if Downloader.download(path, vid, self.vformat.get()) == 1:
# Download Successful
YouTubeVideoForm().remove_from_list(vid)
else:
# TODO - Display Errors
pass
else:
# TODO - Display Errors
pass |
the-stack_106_31707 | """
@Author:lichunhui
@Time:
@Description:
"""
from sqlalchemy.exc import IntegrityError as SqlalchemyIntegrityError, InternalError
from pymysql.err import IntegrityError as PymysqlIntegrityError
from sqlalchemy.exc import InvalidRequestError
from ..logger import db_logger
from .basic import get_db_session
__all__ = ['CommandOperate']
class CommandOperate:
@classmethod
def add_one(cls, data):
with get_db_session() as db_session:
try:
db_session.add(data)
db_session.commit()
except (InternalError, SqlalchemyIntegrityError, PymysqlIntegrityError, InvalidRequestError) as e:
db_session.rollback()
db_logger.error("exception '{}' happened when add data".format(e))
@classmethod
def add_all(cls, datas):
with get_db_session() as db_session:
try:
db_session.add_all(datas)
db_session.commit()
except (SqlalchemyIntegrityError, PymysqlIntegrityError, InvalidRequestError):
for data in datas:
cls.add_one(data)
|
the-stack_106_31709 | class RiskyExtention:
def RiskyExtention():
from urlparse import urlparse
o = urlparse('http://www.cwi.nl:80/%7Eguido/Python.html')
o.scheme
o.port
print(o)
currentpath=o.path
#splitpath=currentpath.split(".")
testlist=[".pdf",".exe",".mp3",".mp4",".bin",".zip",".gif",".jpg",".ps1",".bat",".bin",".ps",".jar",".txt",".rar",".avi",".mov",".avi"]
lastpath=currentpath[:-4]
if lastpath in testlist:
return True
else:
return False
x=RiskyExtention
print (x)
|
the-stack_106_31710 | import collections
import logging
import threading
import time
import pytest
import six
from kafka import SimpleClient
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.structs import TopicPartition
from test.conftest import version
from test.testutil import random_string
def get_connect_str(kafka_broker):
return 'localhost:' + str(kafka_broker.port)
@pytest.fixture
def simple_client(kafka_broker):
return SimpleClient(get_connect_str(kafka_broker))
@pytest.fixture
def topic(simple_client):
topic = random_string(5)
simple_client.ensure_topic_exists(topic)
return topic
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, version):
# 0.8.2 brokers need a topic to function well
if version >= (0, 8, 2) and version < (0, 9):
topic(simple_client(kafka_broker))
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
del consumers[i]
del stop[i]
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
# Verify all consumers are in the same generation
# then log state and break while loop
generations = set([consumer._coordinator.generation
for consumer in list(consumers.values())])
if len(generations) == 1:
for c, consumer in list(consumers.items()):
logging.info("[%s] %s %s: %s", c,
consumer._coordinator.generation,
consumer._coordinator.member_id,
consumer.assignment())
break
assert time.time() < timeout, "timeout waiting for assignments"
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
finally:
for c in range(num_consumers):
stop[c].set()
threads[c].join()
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
def test_heartbeat_timeout(conn, mocker):
mocker.patch('kafka.client_async.KafkaClient.check_version', return_value = '0.9')
mocker.patch('time.time', return_value = 1234)
consumer = KafkaConsumer('foobar')
mocker.patch.object(consumer._coordinator.heartbeat, 'ttl', return_value = 0)
assert consumer._next_timeout() == 1234
|
the-stack_106_31712 | import tweepy
from site_crawler.twitter.credentials import Credentials
from site_crawler.cleaner.cleaner import Cleaner
import csv
import pandas as pd
from sklearn.externals import joblib
model=joblib.load('model.pkl')
credentials = Credentials()
cleaner = Cleaner()
api = credentials.authentinticate_twitter()
def predict(text2):
from sklearn.externals import joblib
model = joblib.load('model.pkl')
prediction = model.predict(text2)
return prediction[0]
text2 = [
"the world's smallest disneyland has posted losses for 9 of the 12 years since it opened. local visitors make up 41%… ",
"kenya's economy struggles",
"loss making venture",
"Uchumi",
"nakumatt",
"Centum ",
"use becomes a public limited company"
]
query = 'safaricom'
max_tweets = 10
searched_tweets = [status for status in tweepy.Cursor(api.search, q=query).items(max_tweets)]
outtweets = [[cleaner.clean_tweets(tweet.text),predict([cleaner.clean_tweets(tweet.text)])] for tweet in searched_tweets]
# print(outtweets)
#
# exit()
# for tweets in outtweets:
# print([tweets])
with open('./predict.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["text", "label"])
writer.writerows(outtweets)
pass
# df=pd.read_csv("./predict.csv")
# df=df.dropna(how='any')
# df=df.drop_duplicates()
# model=joblib.load("model.pkl")
# print(df.text)
# df['label'] = model.predict(df.text)
# print(df.label)
# df.to_csv("predicted.csv",encoding="utf8")
# #print(model.predict(df.text))
# print("read") |
the-stack_106_31719 | #! usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import urllib.error
import time
import os
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
from selenium import webdriver
import datetime
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import sys
import selenium.webdriver.support.ui as ui
import socket
import selenium.webdriver.phantomjs
f=open('url.txt','w',encoding='utf-8')
# Download the picture from the resulting picture link and save it
def SaveImage(link,InputData,count):
try:
urllib.request.urlretrieve(link,'./'+InputData+'/'+str(count)+'.jpg')
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
except Exception as err:
print(err)
print("Generate unknown error, give up saving")
else:
print("picture+1,having" + str(count) + "pictures")
print('Downloading' + str(count + 1) + 'pictures')
#Find the link to the picture
def FindLink(InputData,word):
url = 'https://www.google.com.hk/search?q=%s&newwindow=1&safe=strict&source=lnms&tbm=isch&sa=X&ved=0ahUKEwir1MTc6fnWAhWJjJQKHXfECE4Q_AUICigB&biw=1440&bih=769' % InputData
driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver') # This is a necessary Google driver for Mac
#driver = webdriver.Chrome(executable_path='C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe') # This is a necessary Google driver for Windowns
driver.get(url)
for i in range(5):
print(i)
try:
temp = driver.find_element_by_xpath('//*[@id="smb"]')
temp.click()
except:
print('Not find more!')
time.sleep(5)
driver.implicitly_wait(5)
js = "var q=document.documentElement.scrollTop=%d" % 100000
driver.execute_script(js)
soup = BeautifulSoup(driver.page_source, 'lxml')
if not os.path.exists("./" + word):
os.mkdir('./' + word)
for http in soup.select('.rg_meta'):
link = eval(http.contents[0])['ou']
#print(link,file=f)
count = len(os.listdir('./' + word)) + 1
SaveImage(link,word,count)
if __name__=='__main__':
#Enter keywords to search for...
word='keywords'
InputData=urllib.parse.quote(word)
FindLink(InputData,word)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.